|
1 /* |
|
2 * Copyright (C) 2009 Apple Inc. All rights reserved. |
|
3 * Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com> |
|
4 * |
|
5 * Redistribution and use in source and binary forms, with or without |
|
6 * modification, are permitted provided that the following conditions |
|
7 * are met: |
|
8 * 1. Redistributions of source code must retain the above copyright |
|
9 * notice, this list of conditions and the following disclaimer. |
|
10 * 2. Redistributions in binary form must reproduce the above copyright |
|
11 * notice, this list of conditions and the following disclaimer in the |
|
12 * documentation and/or other materials provided with the distribution. |
|
13 * |
|
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
|
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
|
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
|
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
|
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
|
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
|
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
|
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
|
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
|
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|
25 */ |
|
26 |
|
27 #include "config.h" |
|
28 |
|
29 #if ENABLE(JIT) |
|
30 #if USE(JSVALUE32_64) |
|
31 #include "JIT.h" |
|
32 |
|
33 #include "JITInlineMethods.h" |
|
34 #include "JITStubCall.h" |
|
35 #include "JSArray.h" |
|
36 #include "JSCell.h" |
|
37 #include "JSFunction.h" |
|
38 #include "JSPropertyNameIterator.h" |
|
39 #include "LinkBuffer.h" |
|
40 |
|
41 namespace JSC { |
|
42 |
|
43 void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* globalData, TrampolineStructure *trampolines) |
|
44 { |
|
45 #if ENABLE(JIT_OPTIMIZE_MOD) |
|
46 Label softModBegin = align(); |
|
47 softModulo(); |
|
48 #endif |
|
49 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) |
|
50 // (1) This function provides fast property access for string length |
|
51 Label stringLengthBegin = align(); |
|
52 |
|
53 // regT0 holds payload, regT1 holds tag |
|
54 |
|
55 Jump string_failureCases1 = branch32(NotEqual, regT1, Imm32(JSValue::CellTag)); |
|
56 Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)); |
|
57 |
|
58 // Checks out okay! - get the length from the Ustring. |
|
59 load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_length)), regT2); |
|
60 |
|
61 Jump string_failureCases3 = branch32(Above, regT2, Imm32(INT_MAX)); |
|
62 move(regT2, regT0); |
|
63 move(Imm32(JSValue::Int32Tag), regT1); |
|
64 |
|
65 ret(); |
|
66 #endif |
|
67 |
|
68 JumpList callLinkFailures; |
|
69 // (2) Trampolines for the slow cases of op_call / op_call_eval / op_construct. |
|
70 #if ENABLE(JIT_OPTIMIZE_CALL) |
|
71 // VirtualCallLink Trampoline |
|
72 // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable. |
|
73 Label virtualCallLinkBegin = align(); |
|
74 compileOpCallInitializeCallFrame(); |
|
75 preserveReturnAddressAfterCall(regT3); |
|
76 emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC); |
|
77 restoreArgumentReference(); |
|
78 Call callLazyLinkCall = call(); |
|
79 callLinkFailures.append(branchTestPtr(Zero, regT0)); |
|
80 restoreReturnAddressBeforeReturn(regT3); |
|
81 emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1); |
|
82 jump(regT0); |
|
83 |
|
84 // VirtualConstructLink Trampoline |
|
85 // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable. |
|
86 Label virtualConstructLinkBegin = align(); |
|
87 compileOpCallInitializeCallFrame(); |
|
88 preserveReturnAddressAfterCall(regT3); |
|
89 emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC); |
|
90 restoreArgumentReference(); |
|
91 Call callLazyLinkConstruct = call(); |
|
92 restoreReturnAddressBeforeReturn(regT3); |
|
93 callLinkFailures.append(branchTestPtr(Zero, regT0)); |
|
94 emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1); |
|
95 jump(regT0); |
|
96 |
|
97 #endif // ENABLE(JIT_OPTIMIZE_CALL) |
|
98 |
|
99 // VirtualCall Trampoline |
|
100 // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable. |
|
101 Label virtualCallBegin = align(); |
|
102 compileOpCallInitializeCallFrame(); |
|
103 |
|
104 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2); |
|
105 |
|
106 Jump hasCodeBlock3 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), Imm32(0)); |
|
107 preserveReturnAddressAfterCall(regT3); |
|
108 restoreArgumentReference(); |
|
109 Call callCompileCall = call(); |
|
110 callLinkFailures.append(branchTestPtr(Zero, regT0)); |
|
111 emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1); |
|
112 restoreReturnAddressBeforeReturn(regT3); |
|
113 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2); |
|
114 hasCodeBlock3.link(this); |
|
115 |
|
116 loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForCallWithArityCheck)), regT0); |
|
117 jump(regT0); |
|
118 |
|
119 // VirtualConstruct Trampoline |
|
120 // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable. |
|
121 Label virtualConstructBegin = align(); |
|
122 compileOpCallInitializeCallFrame(); |
|
123 |
|
124 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2); |
|
125 |
|
126 Jump hasCodeBlock4 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), Imm32(0)); |
|
127 preserveReturnAddressAfterCall(regT3); |
|
128 restoreArgumentReference(); |
|
129 Call callCompileCconstruct = call(); |
|
130 callLinkFailures.append(branchTestPtr(Zero, regT0)); |
|
131 emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1); |
|
132 restoreReturnAddressBeforeReturn(regT3); |
|
133 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2); |
|
134 hasCodeBlock4.link(this); |
|
135 |
|
136 loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForConstructWithArityCheck)), regT0); |
|
137 jump(regT0); |
|
138 |
|
139 // If the parser fails we want to be able to be able to keep going, |
|
140 // So we handle this as a parse failure. |
|
141 callLinkFailures.link(this); |
|
142 emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1); |
|
143 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister); |
|
144 restoreReturnAddressBeforeReturn(regT1); |
|
145 move(ImmPtr(&globalData->exceptionLocation), regT2); |
|
146 storePtr(regT1, regT2); |
|
147 poke(callFrameRegister, 1 + OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); |
|
148 poke(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value())); |
|
149 ret(); |
|
150 |
|
151 // NativeCall Trampoline |
|
152 Label nativeCallThunk = privateCompileCTINativeCall(globalData); |
|
153 Label nativeConstructThunk = privateCompileCTINativeCall(globalData, true); |
|
154 |
|
155 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) |
|
156 Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1); |
|
157 Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2); |
|
158 Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3); |
|
159 #endif |
|
160 |
|
161 // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object. |
|
162 LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size())); |
|
163 |
|
164 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) |
|
165 patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail)); |
|
166 patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail)); |
|
167 patchBuffer.link(string_failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail)); |
|
168 #endif |
|
169 #if ENABLE(JIT_OPTIMIZE_CALL) |
|
170 patchBuffer.link(callLazyLinkCall, FunctionPtr(cti_vm_lazyLinkCall)); |
|
171 patchBuffer.link(callLazyLinkConstruct, FunctionPtr(cti_vm_lazyLinkConstruct)); |
|
172 #endif |
|
173 patchBuffer.link(callCompileCall, FunctionPtr(cti_op_call_jitCompile)); |
|
174 patchBuffer.link(callCompileCconstruct, FunctionPtr(cti_op_construct_jitCompile)); |
|
175 |
|
176 CodeRef finalCode = patchBuffer.finalizeCode(); |
|
177 *executablePool = finalCode.m_executablePool; |
|
178 |
|
179 trampolines->ctiVirtualCall = trampolineAt(finalCode, virtualCallBegin); |
|
180 trampolines->ctiVirtualConstruct = trampolineAt(finalCode, virtualConstructBegin); |
|
181 trampolines->ctiNativeCall = trampolineAt(finalCode, nativeCallThunk); |
|
182 trampolines->ctiNativeConstruct = trampolineAt(finalCode, nativeConstructThunk); |
|
183 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) |
|
184 trampolines->ctiStringLengthTrampoline = trampolineAt(finalCode, stringLengthBegin); |
|
185 #endif |
|
186 #if ENABLE(JIT_OPTIMIZE_CALL) |
|
187 trampolines->ctiVirtualCallLink = trampolineAt(finalCode, virtualCallLinkBegin); |
|
188 trampolines->ctiVirtualConstructLink = trampolineAt(finalCode, virtualConstructLinkBegin); |
|
189 #endif |
|
190 #if ENABLE(JIT_OPTIMIZE_MOD) |
|
191 trampolines->ctiSoftModulo = trampolineAt(finalCode, softModBegin); |
|
192 #endif |
|
193 } |
|
194 |
|
195 JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isConstruct) |
|
196 { |
|
197 int executableOffsetToFunction = isConstruct ? OBJECT_OFFSETOF(NativeExecutable, m_constructor) : OBJECT_OFFSETOF(NativeExecutable, m_function); |
|
198 |
|
199 Label nativeCallThunk = align(); |
|
200 |
|
201 #if CPU(X86) |
|
202 // Load caller frame's scope chain into this callframe so that whatever we call can |
|
203 // get to its global data. |
|
204 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0); |
|
205 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0); |
|
206 emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain); |
|
207 |
|
208 peek(regT1); |
|
209 emitPutToCallFrameHeader(regT1, RegisterFile::ReturnPC); |
|
210 |
|
211 // Calling convention: f(ecx, edx, ...); |
|
212 // Host function signature: f(ExecState*); |
|
213 move(callFrameRegister, X86Registers::ecx); |
|
214 |
|
215 subPtr(Imm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call. |
|
216 |
|
217 // call the function |
|
218 emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT1); |
|
219 loadPtr(Address(regT1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT1); |
|
220 move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. |
|
221 call(Address(regT1, executableOffsetToFunction)); |
|
222 |
|
223 addPtr(Imm32(16 - sizeof(void*)), stackPointerRegister); |
|
224 |
|
225 #elif CPU(ARM) |
|
226 // Load caller frame's scope chain into this callframe so that whatever we call can |
|
227 // get to its global data. |
|
228 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2); |
|
229 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2); |
|
230 emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain); |
|
231 |
|
232 preserveReturnAddressAfterCall(regT3); // Callee preserved |
|
233 emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC); |
|
234 |
|
235 // Calling convention: f(r0 == regT0, r1 == regT1, ...); |
|
236 // Host function signature: f(ExecState*); |
|
237 move(callFrameRegister, ARMRegisters::r0); |
|
238 |
|
239 // call the function |
|
240 emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, ARMRegisters::r1); |
|
241 move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. |
|
242 loadPtr(Address(ARMRegisters::r1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2); |
|
243 call(Address(regT2, executableOffsetToFunction)); |
|
244 |
|
245 restoreReturnAddressBeforeReturn(regT3); |
|
246 |
|
247 #elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL) |
|
248 #error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform." |
|
249 #else |
|
250 UNUSED_PARAM(executableOffsetToFunction); |
|
251 breakpoint(); |
|
252 #endif // CPU(X86) |
|
253 |
|
254 // Check for an exception |
|
255 Jump sawException = branch32(NotEqual, AbsoluteAddress(reinterpret_cast<char*>(&globalData->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), Imm32(JSValue::EmptyValueTag)); |
|
256 |
|
257 // Return. |
|
258 ret(); |
|
259 |
|
260 // Handle an exception |
|
261 sawException.link(this); |
|
262 |
|
263 // Grab the return address. |
|
264 preserveReturnAddressAfterCall(regT1); |
|
265 |
|
266 move(ImmPtr(&globalData->exceptionLocation), regT2); |
|
267 storePtr(regT1, regT2); |
|
268 poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); |
|
269 |
|
270 // Set the return address. |
|
271 move(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1); |
|
272 restoreReturnAddressBeforeReturn(regT1); |
|
273 |
|
274 ret(); |
|
275 |
|
276 return nativeCallThunk; |
|
277 } |
|
278 |
|
279 JIT::CodePtr JIT::privateCompileCTINativeCall(PassRefPtr<ExecutablePool> executablePool, JSGlobalData* globalData, NativeFunction func) |
|
280 { |
|
281 Call nativeCall; |
|
282 Label nativeCallThunk = align(); |
|
283 |
|
284 #if CPU(X86) |
|
285 // Load caller frame's scope chain into this callframe so that whatever we call can |
|
286 // get to its global data. |
|
287 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0); |
|
288 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0); |
|
289 emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain); |
|
290 |
|
291 peek(regT1); |
|
292 emitPutToCallFrameHeader(regT1, RegisterFile::ReturnPC); |
|
293 |
|
294 // Calling convention: f(ecx, edx, ...); |
|
295 // Host function signature: f(ExecState*); |
|
296 move(callFrameRegister, X86Registers::ecx); |
|
297 |
|
298 subPtr(Imm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call. |
|
299 |
|
300 move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. |
|
301 |
|
302 // call the function |
|
303 nativeCall = call(); |
|
304 |
|
305 addPtr(Imm32(16 - sizeof(void*)), stackPointerRegister); |
|
306 |
|
307 #elif CPU(ARM) |
|
308 // Load caller frame's scope chain into this callframe so that whatever we call can |
|
309 // get to its global data. |
|
310 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2); |
|
311 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2); |
|
312 emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain); |
|
313 |
|
314 preserveReturnAddressAfterCall(regT3); // Callee preserved |
|
315 emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC); |
|
316 |
|
317 // Calling convention: f(r0 == regT0, r1 == regT1, ...); |
|
318 // Host function signature: f(ExecState*); |
|
319 move(callFrameRegister, ARMRegisters::r0); |
|
320 |
|
321 emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, ARMRegisters::r1); |
|
322 move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. |
|
323 loadPtr(Address(ARMRegisters::r1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2); |
|
324 |
|
325 // call the function |
|
326 nativeCall = call(); |
|
327 |
|
328 restoreReturnAddressBeforeReturn(regT3); |
|
329 |
|
330 #elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL) |
|
331 #error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform." |
|
332 #else |
|
333 breakpoint(); |
|
334 #endif // CPU(X86) |
|
335 |
|
336 // Check for an exception |
|
337 Jump sawException = branch32(NotEqual, AbsoluteAddress(reinterpret_cast<char*>(&globalData->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), Imm32(JSValue::EmptyValueTag)); |
|
338 |
|
339 // Return. |
|
340 ret(); |
|
341 |
|
342 // Handle an exception |
|
343 sawException.link(this); |
|
344 |
|
345 // Grab the return address. |
|
346 preserveReturnAddressAfterCall(regT1); |
|
347 |
|
348 move(ImmPtr(&globalData->exceptionLocation), regT2); |
|
349 storePtr(regT1, regT2); |
|
350 poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); |
|
351 |
|
352 // Set the return address. |
|
353 move(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1); |
|
354 restoreReturnAddressBeforeReturn(regT1); |
|
355 |
|
356 ret(); |
|
357 |
|
358 // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object. |
|
359 LinkBuffer patchBuffer(this, executablePool); |
|
360 |
|
361 patchBuffer.link(nativeCall, FunctionPtr(func)); |
|
362 |
|
363 CodeRef finalCode = patchBuffer.finalizeCode(); |
|
364 return trampolineAt(finalCode, nativeCallThunk); |
|
365 } |
|
366 |
|
367 void JIT::emit_op_mov(Instruction* currentInstruction) |
|
368 { |
|
369 unsigned dst = currentInstruction[1].u.operand; |
|
370 unsigned src = currentInstruction[2].u.operand; |
|
371 |
|
372 if (m_codeBlock->isConstantRegisterIndex(src)) |
|
373 emitStore(dst, getConstantOperand(src)); |
|
374 else { |
|
375 emitLoad(src, regT1, regT0); |
|
376 emitStore(dst, regT1, regT0); |
|
377 map(m_bytecodeOffset + OPCODE_LENGTH(op_mov), dst, regT1, regT0); |
|
378 } |
|
379 } |
|
380 |
|
381 void JIT::emit_op_end(Instruction* currentInstruction) |
|
382 { |
|
383 if (m_codeBlock->needsFullScopeChain()) |
|
384 JITStubCall(this, cti_op_end).call(); |
|
385 ASSERT(returnValueRegister != callFrameRegister); |
|
386 emitLoad(currentInstruction[1].u.operand, regT1, regT0); |
|
387 restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register)))); |
|
388 ret(); |
|
389 } |
|
390 |
|
391 void JIT::emit_op_jmp(Instruction* currentInstruction) |
|
392 { |
|
393 unsigned target = currentInstruction[1].u.operand; |
|
394 addJump(jump(), target); |
|
395 } |
|
396 |
|
397 void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction) |
|
398 { |
|
399 unsigned op1 = currentInstruction[1].u.operand; |
|
400 unsigned op2 = currentInstruction[2].u.operand; |
|
401 unsigned target = currentInstruction[3].u.operand; |
|
402 |
|
403 emitTimeoutCheck(); |
|
404 |
|
405 if (isOperandConstantImmediateInt(op1)) { |
|
406 emitLoad(op2, regT1, regT0); |
|
407 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); |
|
408 addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op1).asInt32())), target); |
|
409 return; |
|
410 } |
|
411 |
|
412 if (isOperandConstantImmediateInt(op2)) { |
|
413 emitLoad(op1, regT1, regT0); |
|
414 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); |
|
415 addJump(branch32(LessThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target); |
|
416 return; |
|
417 } |
|
418 |
|
419 emitLoad2(op1, regT1, regT0, op2, regT3, regT2); |
|
420 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); |
|
421 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag))); |
|
422 addJump(branch32(LessThanOrEqual, regT0, regT2), target); |
|
423 } |
|
424 |
|
425 void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
426 { |
|
427 unsigned op1 = currentInstruction[1].u.operand; |
|
428 unsigned op2 = currentInstruction[2].u.operand; |
|
429 unsigned target = currentInstruction[3].u.operand; |
|
430 |
|
431 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2)) |
|
432 linkSlowCase(iter); // int32 check |
|
433 linkSlowCase(iter); // int32 check |
|
434 |
|
435 JITStubCall stubCall(this, cti_op_loop_if_lesseq); |
|
436 stubCall.addArgument(op1); |
|
437 stubCall.addArgument(op2); |
|
438 stubCall.call(); |
|
439 emitJumpSlowToHot(branchTest32(NonZero, regT0), target); |
|
440 } |
|
441 |
|
442 void JIT::emit_op_new_object(Instruction* currentInstruction) |
|
443 { |
|
444 JITStubCall(this, cti_op_new_object).call(currentInstruction[1].u.operand); |
|
445 } |
|
446 |
|
447 void JIT::emit_op_instanceof(Instruction* currentInstruction) |
|
448 { |
|
449 unsigned dst = currentInstruction[1].u.operand; |
|
450 unsigned value = currentInstruction[2].u.operand; |
|
451 unsigned baseVal = currentInstruction[3].u.operand; |
|
452 unsigned proto = currentInstruction[4].u.operand; |
|
453 |
|
454 // Load the operands into registers. |
|
455 // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result. |
|
456 emitLoadPayload(value, regT2); |
|
457 emitLoadPayload(baseVal, regT0); |
|
458 emitLoadPayload(proto, regT1); |
|
459 |
|
460 // Check that value, baseVal, and proto are cells. |
|
461 emitJumpSlowCaseIfNotJSCell(value); |
|
462 emitJumpSlowCaseIfNotJSCell(baseVal); |
|
463 emitJumpSlowCaseIfNotJSCell(proto); |
|
464 |
|
465 // Check that baseVal 'ImplementsDefaultHasInstance'. |
|
466 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT0); |
|
467 addSlowCase(branchTest8(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsDefaultHasInstance))); |
|
468 |
|
469 // Optimistically load the result true, and start looping. |
|
470 // Initially, regT1 still contains proto and regT2 still contains value. |
|
471 // As we loop regT2 will be updated with its prototype, recursively walking the prototype chain. |
|
472 move(Imm32(JSValue::TrueTag), regT0); |
|
473 Label loop(this); |
|
474 |
|
475 // Load the prototype of the cell in regT2. If this is equal to regT1 - WIN! |
|
476 // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again. |
|
477 loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); |
|
478 load32(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2); |
|
479 Jump isInstance = branchPtr(Equal, regT2, regT1); |
|
480 branchTest32(NonZero, regT2).linkTo(loop, this); |
|
481 |
|
482 // We get here either by dropping out of the loop, or if value was not an Object. Result is false. |
|
483 move(Imm32(JSValue::FalseTag), regT0); |
|
484 |
|
485 // isInstance jumps right down to here, to skip setting the result to false (it has already set true). |
|
486 isInstance.link(this); |
|
487 emitStoreBool(dst, regT0); |
|
488 } |
|
489 |
|
490 void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
491 { |
|
492 unsigned dst = currentInstruction[1].u.operand; |
|
493 unsigned value = currentInstruction[2].u.operand; |
|
494 unsigned baseVal = currentInstruction[3].u.operand; |
|
495 unsigned proto = currentInstruction[4].u.operand; |
|
496 |
|
497 linkSlowCaseIfNotJSCell(iter, value); |
|
498 linkSlowCaseIfNotJSCell(iter, baseVal); |
|
499 linkSlowCaseIfNotJSCell(iter, proto); |
|
500 linkSlowCase(iter); |
|
501 |
|
502 JITStubCall stubCall(this, cti_op_instanceof); |
|
503 stubCall.addArgument(value); |
|
504 stubCall.addArgument(baseVal); |
|
505 stubCall.addArgument(proto); |
|
506 stubCall.call(dst); |
|
507 } |
|
508 |
|
509 void JIT::emit_op_new_func(Instruction* currentInstruction) |
|
510 { |
|
511 JITStubCall stubCall(this, cti_op_new_func); |
|
512 stubCall.addArgument(ImmPtr(m_codeBlock->functionDecl(currentInstruction[2].u.operand))); |
|
513 stubCall.call(currentInstruction[1].u.operand); |
|
514 } |
|
515 |
|
516 void JIT::emit_op_get_global_var(Instruction* currentInstruction) |
|
517 { |
|
518 int dst = currentInstruction[1].u.operand; |
|
519 JSGlobalObject* globalObject = static_cast<JSGlobalObject*>(currentInstruction[2].u.jsCell); |
|
520 ASSERT(globalObject->isGlobalObject()); |
|
521 int index = currentInstruction[3].u.operand; |
|
522 |
|
523 loadPtr(&globalObject->d()->registers, regT2); |
|
524 |
|
525 emitLoad(index, regT1, regT0, regT2); |
|
526 emitStore(dst, regT1, regT0); |
|
527 map(m_bytecodeOffset + OPCODE_LENGTH(op_get_global_var), dst, regT1, regT0); |
|
528 } |
|
529 |
|
530 void JIT::emit_op_put_global_var(Instruction* currentInstruction) |
|
531 { |
|
532 JSGlobalObject* globalObject = static_cast<JSGlobalObject*>(currentInstruction[1].u.jsCell); |
|
533 ASSERT(globalObject->isGlobalObject()); |
|
534 int index = currentInstruction[2].u.operand; |
|
535 int value = currentInstruction[3].u.operand; |
|
536 |
|
537 emitLoad(value, regT1, regT0); |
|
538 |
|
539 loadPtr(&globalObject->d()->registers, regT2); |
|
540 emitStore(index, regT1, regT0, regT2); |
|
541 map(m_bytecodeOffset + OPCODE_LENGTH(op_put_global_var), value, regT1, regT0); |
|
542 } |
|
543 |
|
544 void JIT::emit_op_get_scoped_var(Instruction* currentInstruction) |
|
545 { |
|
546 int dst = currentInstruction[1].u.operand; |
|
547 int index = currentInstruction[2].u.operand; |
|
548 int skip = currentInstruction[3].u.operand; |
|
549 |
|
550 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2); |
|
551 while (skip--) |
|
552 loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2); |
|
553 |
|
554 loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2); |
|
555 loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject, d)), regT2); |
|
556 loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), regT2); |
|
557 |
|
558 emitLoad(index, regT1, regT0, regT2); |
|
559 emitStore(dst, regT1, regT0); |
|
560 map(m_bytecodeOffset + OPCODE_LENGTH(op_get_scoped_var), dst, regT1, regT0); |
|
561 } |
|
562 |
|
563 void JIT::emit_op_put_scoped_var(Instruction* currentInstruction) |
|
564 { |
|
565 int index = currentInstruction[1].u.operand; |
|
566 int skip = currentInstruction[2].u.operand; |
|
567 int value = currentInstruction[3].u.operand; |
|
568 |
|
569 emitLoad(value, regT1, regT0); |
|
570 |
|
571 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2); |
|
572 while (skip--) |
|
573 loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2); |
|
574 |
|
575 loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2); |
|
576 loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject, d)), regT2); |
|
577 loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), regT2); |
|
578 |
|
579 emitStore(index, regT1, regT0, regT2); |
|
580 map(m_bytecodeOffset + OPCODE_LENGTH(op_put_scoped_var), value, regT1, regT0); |
|
581 } |
|
582 |
|
583 void JIT::emit_op_tear_off_activation(Instruction* currentInstruction) |
|
584 { |
|
585 JITStubCall stubCall(this, cti_op_tear_off_activation); |
|
586 stubCall.addArgument(currentInstruction[1].u.operand); |
|
587 stubCall.addArgument(unmodifiedArgumentsRegister(currentInstruction[2].u.operand)); |
|
588 stubCall.call(); |
|
589 } |
|
590 |
|
591 void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction) |
|
592 { |
|
593 int dst = currentInstruction[1].u.operand; |
|
594 |
|
595 Jump argsNotCreated = branch32(Equal, tagFor(unmodifiedArgumentsRegister(dst)), Imm32(JSValue::EmptyValueTag)); |
|
596 JITStubCall stubCall(this, cti_op_tear_off_arguments); |
|
597 stubCall.addArgument(unmodifiedArgumentsRegister(dst)); |
|
598 stubCall.call(); |
|
599 argsNotCreated.link(this); |
|
600 } |
|
601 |
|
602 void JIT::emit_op_new_array(Instruction* currentInstruction) |
|
603 { |
|
604 JITStubCall stubCall(this, cti_op_new_array); |
|
605 stubCall.addArgument(Imm32(currentInstruction[2].u.operand)); |
|
606 stubCall.addArgument(Imm32(currentInstruction[3].u.operand)); |
|
607 stubCall.call(currentInstruction[1].u.operand); |
|
608 } |
|
609 |
|
610 void JIT::emit_op_resolve(Instruction* currentInstruction) |
|
611 { |
|
612 JITStubCall stubCall(this, cti_op_resolve); |
|
613 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); |
|
614 stubCall.call(currentInstruction[1].u.operand); |
|
615 } |
|
616 |
|
617 void JIT::emit_op_to_primitive(Instruction* currentInstruction) |
|
618 { |
|
619 int dst = currentInstruction[1].u.operand; |
|
620 int src = currentInstruction[2].u.operand; |
|
621 |
|
622 emitLoad(src, regT1, regT0); |
|
623 |
|
624 Jump isImm = branch32(NotEqual, regT1, Imm32(JSValue::CellTag)); |
|
625 addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr))); |
|
626 isImm.link(this); |
|
627 |
|
628 if (dst != src) |
|
629 emitStore(dst, regT1, regT0); |
|
630 map(m_bytecodeOffset + OPCODE_LENGTH(op_to_primitive), dst, regT1, regT0); |
|
631 } |
|
632 |
|
633 void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
634 { |
|
635 int dst = currentInstruction[1].u.operand; |
|
636 |
|
637 linkSlowCase(iter); |
|
638 |
|
639 JITStubCall stubCall(this, cti_op_to_primitive); |
|
640 stubCall.addArgument(regT1, regT0); |
|
641 stubCall.call(dst); |
|
642 } |
|
643 |
|
644 void JIT::emit_op_strcat(Instruction* currentInstruction) |
|
645 { |
|
646 JITStubCall stubCall(this, cti_op_strcat); |
|
647 stubCall.addArgument(Imm32(currentInstruction[2].u.operand)); |
|
648 stubCall.addArgument(Imm32(currentInstruction[3].u.operand)); |
|
649 stubCall.call(currentInstruction[1].u.operand); |
|
650 } |
|
651 |
|
652 void JIT::emit_op_resolve_base(Instruction* currentInstruction) |
|
653 { |
|
654 JITStubCall stubCall(this, cti_op_resolve_base); |
|
655 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); |
|
656 stubCall.call(currentInstruction[1].u.operand); |
|
657 } |
|
658 |
|
659 void JIT::emit_op_resolve_skip(Instruction* currentInstruction) |
|
660 { |
|
661 JITStubCall stubCall(this, cti_op_resolve_skip); |
|
662 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); |
|
663 stubCall.addArgument(Imm32(currentInstruction[3].u.operand)); |
|
664 stubCall.call(currentInstruction[1].u.operand); |
|
665 } |
|
666 |
|
667 void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool dynamic) |
|
668 { |
|
669 // FIXME: Optimize to use patching instead of so many memory accesses. |
|
670 |
|
671 unsigned dst = currentInstruction[1].u.operand; |
|
672 void* globalObject = currentInstruction[2].u.jsCell; |
|
673 |
|
674 unsigned currentIndex = m_globalResolveInfoIndex++; |
|
675 void* structureAddress = &(m_codeBlock->globalResolveInfo(currentIndex).structure); |
|
676 void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset); |
|
677 |
|
678 // Verify structure. |
|
679 move(ImmPtr(globalObject), regT0); |
|
680 loadPtr(structureAddress, regT1); |
|
681 addSlowCase(branchPtr(NotEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)))); |
|
682 |
|
683 // Load property. |
|
684 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSGlobalObject, m_externalStorage)), regT2); |
|
685 load32(offsetAddr, regT3); |
|
686 load32(BaseIndex(regT2, regT3, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload |
|
687 load32(BaseIndex(regT2, regT3, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag |
|
688 emitStore(dst, regT1, regT0); |
|
689 map(m_bytecodeOffset + dynamic ? OPCODE_LENGTH(op_resolve_global_dynamic) : OPCODE_LENGTH(op_resolve_global), dst, regT1, regT0); |
|
690 } |
|
691 |
|
692 void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
693 { |
|
694 unsigned dst = currentInstruction[1].u.operand; |
|
695 void* globalObject = currentInstruction[2].u.jsCell; |
|
696 Identifier* ident = &m_codeBlock->identifier(currentInstruction[3].u.operand); |
|
697 |
|
698 unsigned currentIndex = m_globalResolveInfoIndex++; |
|
699 |
|
700 linkSlowCase(iter); |
|
701 JITStubCall stubCall(this, cti_op_resolve_global); |
|
702 stubCall.addArgument(ImmPtr(globalObject)); |
|
703 stubCall.addArgument(ImmPtr(ident)); |
|
704 stubCall.addArgument(Imm32(currentIndex)); |
|
705 stubCall.call(dst); |
|
706 } |
|
707 |
|
708 void JIT::emit_op_not(Instruction* currentInstruction) |
|
709 { |
|
710 unsigned dst = currentInstruction[1].u.operand; |
|
711 unsigned src = currentInstruction[2].u.operand; |
|
712 |
|
713 emitLoadTag(src, regT0); |
|
714 |
|
715 xor32(Imm32(JSValue::FalseTag), regT0); |
|
716 addSlowCase(branchTest32(NonZero, regT0, Imm32(~1))); |
|
717 xor32(Imm32(JSValue::TrueTag), regT0); |
|
718 |
|
719 emitStoreBool(dst, regT0, (dst == src)); |
|
720 } |
|
721 |
|
722 void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
723 { |
|
724 unsigned dst = currentInstruction[1].u.operand; |
|
725 unsigned src = currentInstruction[2].u.operand; |
|
726 |
|
727 linkSlowCase(iter); |
|
728 |
|
729 JITStubCall stubCall(this, cti_op_not); |
|
730 stubCall.addArgument(src); |
|
731 stubCall.call(dst); |
|
732 } |
|
733 |
|
734 void JIT::emit_op_jfalse(Instruction* currentInstruction) |
|
735 { |
|
736 unsigned cond = currentInstruction[1].u.operand; |
|
737 unsigned target = currentInstruction[2].u.operand; |
|
738 |
|
739 emitLoad(cond, regT1, regT0); |
|
740 |
|
741 Jump isTrue = branch32(Equal, regT1, Imm32(JSValue::TrueTag)); |
|
742 addJump(branch32(Equal, regT1, Imm32(JSValue::FalseTag)), target); |
|
743 |
|
744 Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)); |
|
745 Jump isTrue2 = branch32(NotEqual, regT0, Imm32(0)); |
|
746 addJump(jump(), target); |
|
747 |
|
748 if (supportsFloatingPoint()) { |
|
749 isNotInteger.link(this); |
|
750 |
|
751 addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag))); |
|
752 |
|
753 zeroDouble(fpRegT0); |
|
754 emitLoadDouble(cond, fpRegT1); |
|
755 addJump(branchDouble(DoubleEqualOrUnordered, fpRegT0, fpRegT1), target); |
|
756 } else |
|
757 addSlowCase(isNotInteger); |
|
758 |
|
759 isTrue.link(this); |
|
760 isTrue2.link(this); |
|
761 } |
|
762 |
|
763 void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
764 { |
|
765 unsigned cond = currentInstruction[1].u.operand; |
|
766 unsigned target = currentInstruction[2].u.operand; |
|
767 |
|
768 linkSlowCase(iter); |
|
769 JITStubCall stubCall(this, cti_op_jtrue); |
|
770 stubCall.addArgument(cond); |
|
771 stubCall.call(); |
|
772 emitJumpSlowToHot(branchTest32(Zero, regT0), target); // Inverted. |
|
773 } |
|
774 |
|
775 void JIT::emit_op_jtrue(Instruction* currentInstruction) |
|
776 { |
|
777 unsigned cond = currentInstruction[1].u.operand; |
|
778 unsigned target = currentInstruction[2].u.operand; |
|
779 |
|
780 emitLoad(cond, regT1, regT0); |
|
781 |
|
782 Jump isFalse = branch32(Equal, regT1, Imm32(JSValue::FalseTag)); |
|
783 addJump(branch32(Equal, regT1, Imm32(JSValue::TrueTag)), target); |
|
784 |
|
785 Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)); |
|
786 Jump isFalse2 = branch32(Equal, regT0, Imm32(0)); |
|
787 addJump(jump(), target); |
|
788 |
|
789 if (supportsFloatingPoint()) { |
|
790 isNotInteger.link(this); |
|
791 |
|
792 addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag))); |
|
793 |
|
794 zeroDouble(fpRegT0); |
|
795 emitLoadDouble(cond, fpRegT1); |
|
796 addJump(branchDouble(DoubleNotEqual, fpRegT0, fpRegT1), target); |
|
797 } else |
|
798 addSlowCase(isNotInteger); |
|
799 |
|
800 isFalse.link(this); |
|
801 isFalse2.link(this); |
|
802 } |
|
803 |
|
804 void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
805 { |
|
806 unsigned cond = currentInstruction[1].u.operand; |
|
807 unsigned target = currentInstruction[2].u.operand; |
|
808 |
|
809 linkSlowCase(iter); |
|
810 JITStubCall stubCall(this, cti_op_jtrue); |
|
811 stubCall.addArgument(cond); |
|
812 stubCall.call(); |
|
813 emitJumpSlowToHot(branchTest32(NonZero, regT0), target); |
|
814 } |
|
815 |
|
816 void JIT::emit_op_jeq_null(Instruction* currentInstruction) |
|
817 { |
|
818 unsigned src = currentInstruction[1].u.operand; |
|
819 unsigned target = currentInstruction[2].u.operand; |
|
820 |
|
821 emitLoad(src, regT1, regT0); |
|
822 |
|
823 Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag)); |
|
824 |
|
825 // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure. |
|
826 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); |
|
827 addJump(branchTest8(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target); |
|
828 |
|
829 Jump wasNotImmediate = jump(); |
|
830 |
|
831 // Now handle the immediate cases - undefined & null |
|
832 isImmediate.link(this); |
|
833 |
|
834 set32(Equal, regT1, Imm32(JSValue::NullTag), regT2); |
|
835 set32(Equal, regT1, Imm32(JSValue::UndefinedTag), regT1); |
|
836 or32(regT2, regT1); |
|
837 |
|
838 addJump(branchTest32(NonZero, regT1), target); |
|
839 |
|
840 wasNotImmediate.link(this); |
|
841 } |
|
842 |
|
843 void JIT::emit_op_jneq_null(Instruction* currentInstruction) |
|
844 { |
|
845 unsigned src = currentInstruction[1].u.operand; |
|
846 unsigned target = currentInstruction[2].u.operand; |
|
847 |
|
848 emitLoad(src, regT1, regT0); |
|
849 |
|
850 Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag)); |
|
851 |
|
852 // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure. |
|
853 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); |
|
854 addJump(branchTest8(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target); |
|
855 |
|
856 Jump wasNotImmediate = jump(); |
|
857 |
|
858 // Now handle the immediate cases - undefined & null |
|
859 isImmediate.link(this); |
|
860 |
|
861 set32(Equal, regT1, Imm32(JSValue::NullTag), regT2); |
|
862 set32(Equal, regT1, Imm32(JSValue::UndefinedTag), regT1); |
|
863 or32(regT2, regT1); |
|
864 |
|
865 addJump(branchTest32(Zero, regT1), target); |
|
866 |
|
867 wasNotImmediate.link(this); |
|
868 } |
|
869 |
|
870 void JIT::emit_op_jneq_ptr(Instruction* currentInstruction) |
|
871 { |
|
872 unsigned src = currentInstruction[1].u.operand; |
|
873 JSCell* ptr = currentInstruction[2].u.jsCell; |
|
874 unsigned target = currentInstruction[3].u.operand; |
|
875 |
|
876 emitLoad(src, regT1, regT0); |
|
877 addJump(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)), target); |
|
878 addJump(branchPtr(NotEqual, regT0, ImmPtr(ptr)), target); |
|
879 } |
|
880 |
|
881 void JIT::emit_op_jsr(Instruction* currentInstruction) |
|
882 { |
|
883 int retAddrDst = currentInstruction[1].u.operand; |
|
884 int target = currentInstruction[2].u.operand; |
|
885 DataLabelPtr storeLocation = storePtrWithPatch(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst)); |
|
886 addJump(jump(), target); |
|
887 m_jsrSites.append(JSRInfo(storeLocation, label())); |
|
888 } |
|
889 |
|
890 void JIT::emit_op_sret(Instruction* currentInstruction) |
|
891 { |
|
892 jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand)); |
|
893 } |
|
894 |
|
895 void JIT::emit_op_eq(Instruction* currentInstruction) |
|
896 { |
|
897 unsigned dst = currentInstruction[1].u.operand; |
|
898 unsigned src1 = currentInstruction[2].u.operand; |
|
899 unsigned src2 = currentInstruction[3].u.operand; |
|
900 |
|
901 emitLoad2(src1, regT1, regT0, src2, regT3, regT2); |
|
902 addSlowCase(branch32(NotEqual, regT1, regT3)); |
|
903 addSlowCase(branch32(Equal, regT1, Imm32(JSValue::CellTag))); |
|
904 addSlowCase(branch32(Below, regT1, Imm32(JSValue::LowestTag))); |
|
905 |
|
906 set8(Equal, regT0, regT2, regT0); |
|
907 or32(Imm32(JSValue::FalseTag), regT0); |
|
908 |
|
909 emitStoreBool(dst, regT0); |
|
910 } |
|
911 |
|
912 void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
913 { |
|
914 unsigned dst = currentInstruction[1].u.operand; |
|
915 unsigned op1 = currentInstruction[2].u.operand; |
|
916 unsigned op2 = currentInstruction[3].u.operand; |
|
917 |
|
918 JumpList storeResult; |
|
919 JumpList genericCase; |
|
920 |
|
921 genericCase.append(getSlowCase(iter)); // tags not equal |
|
922 |
|
923 linkSlowCase(iter); // tags equal and JSCell |
|
924 genericCase.append(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr))); |
|
925 genericCase.append(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsStringVPtr))); |
|
926 |
|
927 // String case. |
|
928 JITStubCall stubCallEqStrings(this, cti_op_eq_strings); |
|
929 stubCallEqStrings.addArgument(regT0); |
|
930 stubCallEqStrings.addArgument(regT2); |
|
931 stubCallEqStrings.call(); |
|
932 storeResult.append(jump()); |
|
933 |
|
934 // Generic case. |
|
935 genericCase.append(getSlowCase(iter)); // doubles |
|
936 genericCase.link(this); |
|
937 JITStubCall stubCallEq(this, cti_op_eq); |
|
938 stubCallEq.addArgument(op1); |
|
939 stubCallEq.addArgument(op2); |
|
940 stubCallEq.call(regT0); |
|
941 |
|
942 storeResult.link(this); |
|
943 or32(Imm32(JSValue::FalseTag), regT0); |
|
944 emitStoreBool(dst, regT0); |
|
945 } |
|
946 |
|
947 void JIT::emit_op_neq(Instruction* currentInstruction) |
|
948 { |
|
949 unsigned dst = currentInstruction[1].u.operand; |
|
950 unsigned src1 = currentInstruction[2].u.operand; |
|
951 unsigned src2 = currentInstruction[3].u.operand; |
|
952 |
|
953 emitLoad2(src1, regT1, regT0, src2, regT3, regT2); |
|
954 addSlowCase(branch32(NotEqual, regT1, regT3)); |
|
955 addSlowCase(branch32(Equal, regT1, Imm32(JSValue::CellTag))); |
|
956 addSlowCase(branch32(Below, regT1, Imm32(JSValue::LowestTag))); |
|
957 |
|
958 set8(NotEqual, regT0, regT2, regT0); |
|
959 or32(Imm32(JSValue::FalseTag), regT0); |
|
960 |
|
961 emitStoreBool(dst, regT0); |
|
962 } |
|
963 |
|
964 void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
965 { |
|
966 unsigned dst = currentInstruction[1].u.operand; |
|
967 |
|
968 JumpList storeResult; |
|
969 JumpList genericCase; |
|
970 |
|
971 genericCase.append(getSlowCase(iter)); // tags not equal |
|
972 |
|
973 linkSlowCase(iter); // tags equal and JSCell |
|
974 genericCase.append(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr))); |
|
975 genericCase.append(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsStringVPtr))); |
|
976 |
|
977 // String case. |
|
978 JITStubCall stubCallEqStrings(this, cti_op_eq_strings); |
|
979 stubCallEqStrings.addArgument(regT0); |
|
980 stubCallEqStrings.addArgument(regT2); |
|
981 stubCallEqStrings.call(regT0); |
|
982 storeResult.append(jump()); |
|
983 |
|
984 // Generic case. |
|
985 genericCase.append(getSlowCase(iter)); // doubles |
|
986 genericCase.link(this); |
|
987 JITStubCall stubCallEq(this, cti_op_eq); |
|
988 stubCallEq.addArgument(regT1, regT0); |
|
989 stubCallEq.addArgument(regT3, regT2); |
|
990 stubCallEq.call(regT0); |
|
991 |
|
992 storeResult.link(this); |
|
993 xor32(Imm32(0x1), regT0); |
|
994 or32(Imm32(JSValue::FalseTag), regT0); |
|
995 emitStoreBool(dst, regT0); |
|
996 } |
|
997 |
|
998 void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type) |
|
999 { |
|
1000 unsigned dst = currentInstruction[1].u.operand; |
|
1001 unsigned src1 = currentInstruction[2].u.operand; |
|
1002 unsigned src2 = currentInstruction[3].u.operand; |
|
1003 |
|
1004 emitLoadTag(src1, regT0); |
|
1005 emitLoadTag(src2, regT1); |
|
1006 |
|
1007 // Jump to a slow case if either operand is double, or if both operands are |
|
1008 // cells and/or Int32s. |
|
1009 move(regT0, regT2); |
|
1010 and32(regT1, regT2); |
|
1011 addSlowCase(branch32(Below, regT2, Imm32(JSValue::LowestTag))); |
|
1012 addSlowCase(branch32(AboveOrEqual, regT2, Imm32(JSValue::CellTag))); |
|
1013 |
|
1014 if (type == OpStrictEq) |
|
1015 set8(Equal, regT0, regT1, regT0); |
|
1016 else |
|
1017 set8(NotEqual, regT0, regT1, regT0); |
|
1018 |
|
1019 or32(Imm32(JSValue::FalseTag), regT0); |
|
1020 |
|
1021 emitStoreBool(dst, regT0); |
|
1022 } |
|
1023 |
|
1024 void JIT::emit_op_stricteq(Instruction* currentInstruction) |
|
1025 { |
|
1026 compileOpStrictEq(currentInstruction, OpStrictEq); |
|
1027 } |
|
1028 |
|
1029 void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1030 { |
|
1031 unsigned dst = currentInstruction[1].u.operand; |
|
1032 unsigned src1 = currentInstruction[2].u.operand; |
|
1033 unsigned src2 = currentInstruction[3].u.operand; |
|
1034 |
|
1035 linkSlowCase(iter); |
|
1036 linkSlowCase(iter); |
|
1037 |
|
1038 JITStubCall stubCall(this, cti_op_stricteq); |
|
1039 stubCall.addArgument(src1); |
|
1040 stubCall.addArgument(src2); |
|
1041 stubCall.call(dst); |
|
1042 } |
|
1043 |
|
1044 void JIT::emit_op_nstricteq(Instruction* currentInstruction) |
|
1045 { |
|
1046 compileOpStrictEq(currentInstruction, OpNStrictEq); |
|
1047 } |
|
1048 |
|
1049 void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1050 { |
|
1051 unsigned dst = currentInstruction[1].u.operand; |
|
1052 unsigned src1 = currentInstruction[2].u.operand; |
|
1053 unsigned src2 = currentInstruction[3].u.operand; |
|
1054 |
|
1055 linkSlowCase(iter); |
|
1056 linkSlowCase(iter); |
|
1057 |
|
1058 JITStubCall stubCall(this, cti_op_nstricteq); |
|
1059 stubCall.addArgument(src1); |
|
1060 stubCall.addArgument(src2); |
|
1061 stubCall.call(dst); |
|
1062 } |
|
1063 |
|
1064 void JIT::emit_op_eq_null(Instruction* currentInstruction) |
|
1065 { |
|
1066 unsigned dst = currentInstruction[1].u.operand; |
|
1067 unsigned src = currentInstruction[2].u.operand; |
|
1068 |
|
1069 emitLoad(src, regT1, regT0); |
|
1070 Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag)); |
|
1071 |
|
1072 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1); |
|
1073 setTest8(NonZero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT1); |
|
1074 |
|
1075 Jump wasNotImmediate = jump(); |
|
1076 |
|
1077 isImmediate.link(this); |
|
1078 |
|
1079 set8(Equal, regT1, Imm32(JSValue::NullTag), regT2); |
|
1080 set8(Equal, regT1, Imm32(JSValue::UndefinedTag), regT1); |
|
1081 or32(regT2, regT1); |
|
1082 |
|
1083 wasNotImmediate.link(this); |
|
1084 |
|
1085 or32(Imm32(JSValue::FalseTag), regT1); |
|
1086 |
|
1087 emitStoreBool(dst, regT1); |
|
1088 } |
|
1089 |
|
1090 void JIT::emit_op_neq_null(Instruction* currentInstruction) |
|
1091 { |
|
1092 unsigned dst = currentInstruction[1].u.operand; |
|
1093 unsigned src = currentInstruction[2].u.operand; |
|
1094 |
|
1095 emitLoad(src, regT1, regT0); |
|
1096 Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag)); |
|
1097 |
|
1098 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1); |
|
1099 setTest8(Zero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT1); |
|
1100 |
|
1101 Jump wasNotImmediate = jump(); |
|
1102 |
|
1103 isImmediate.link(this); |
|
1104 |
|
1105 set8(NotEqual, regT1, Imm32(JSValue::NullTag), regT2); |
|
1106 set8(NotEqual, regT1, Imm32(JSValue::UndefinedTag), regT1); |
|
1107 and32(regT2, regT1); |
|
1108 |
|
1109 wasNotImmediate.link(this); |
|
1110 |
|
1111 or32(Imm32(JSValue::FalseTag), regT1); |
|
1112 |
|
1113 emitStoreBool(dst, regT1); |
|
1114 } |
|
1115 |
|
1116 void JIT::emit_op_resolve_with_base(Instruction* currentInstruction) |
|
1117 { |
|
1118 JITStubCall stubCall(this, cti_op_resolve_with_base); |
|
1119 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand))); |
|
1120 stubCall.addArgument(Imm32(currentInstruction[1].u.operand)); |
|
1121 stubCall.call(currentInstruction[2].u.operand); |
|
1122 } |
|
1123 |
|
1124 void JIT::emit_op_new_func_exp(Instruction* currentInstruction) |
|
1125 { |
|
1126 JITStubCall stubCall(this, cti_op_new_func_exp); |
|
1127 stubCall.addArgument(ImmPtr(m_codeBlock->functionExpr(currentInstruction[2].u.operand))); |
|
1128 stubCall.call(currentInstruction[1].u.operand); |
|
1129 } |
|
1130 |
|
1131 void JIT::emit_op_throw(Instruction* currentInstruction) |
|
1132 { |
|
1133 unsigned exception = currentInstruction[1].u.operand; |
|
1134 JITStubCall stubCall(this, cti_op_throw); |
|
1135 stubCall.addArgument(exception); |
|
1136 stubCall.call(); |
|
1137 |
|
1138 #ifndef NDEBUG |
|
1139 // cti_op_throw always changes it's return address, |
|
1140 // this point in the code should never be reached. |
|
1141 breakpoint(); |
|
1142 #endif |
|
1143 } |
|
1144 |
|
1145 void JIT::emit_op_get_pnames(Instruction* currentInstruction) |
|
1146 { |
|
1147 int dst = currentInstruction[1].u.operand; |
|
1148 int base = currentInstruction[2].u.operand; |
|
1149 int i = currentInstruction[3].u.operand; |
|
1150 int size = currentInstruction[4].u.operand; |
|
1151 int breakTarget = currentInstruction[5].u.operand; |
|
1152 |
|
1153 JumpList isNotObject; |
|
1154 |
|
1155 emitLoad(base, regT1, regT0); |
|
1156 if (!m_codeBlock->isKnownNotImmediate(base)) |
|
1157 isNotObject.append(branch32(NotEqual, regT1, Imm32(JSValue::CellTag))); |
|
1158 if (base != m_codeBlock->thisRegister()) { |
|
1159 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); |
|
1160 isNotObject.append(branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType))); |
|
1161 } |
|
1162 |
|
1163 // We could inline the case where you have a valid cache, but |
|
1164 // this call doesn't seem to be hot. |
|
1165 Label isObject(this); |
|
1166 JITStubCall getPnamesStubCall(this, cti_op_get_pnames); |
|
1167 getPnamesStubCall.addArgument(regT0); |
|
1168 getPnamesStubCall.call(dst); |
|
1169 load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3); |
|
1170 store32(Imm32(0), addressFor(i)); |
|
1171 store32(regT3, addressFor(size)); |
|
1172 Jump end = jump(); |
|
1173 |
|
1174 isNotObject.link(this); |
|
1175 addJump(branch32(Equal, regT1, Imm32(JSValue::NullTag)), breakTarget); |
|
1176 addJump(branch32(Equal, regT1, Imm32(JSValue::UndefinedTag)), breakTarget); |
|
1177 JITStubCall toObjectStubCall(this, cti_to_object); |
|
1178 toObjectStubCall.addArgument(regT1, regT0); |
|
1179 toObjectStubCall.call(base); |
|
1180 jump().linkTo(isObject, this); |
|
1181 |
|
1182 end.link(this); |
|
1183 } |
|
1184 |
|
1185 void JIT::emit_op_next_pname(Instruction* currentInstruction) |
|
1186 { |
|
1187 int dst = currentInstruction[1].u.operand; |
|
1188 int base = currentInstruction[2].u.operand; |
|
1189 int i = currentInstruction[3].u.operand; |
|
1190 int size = currentInstruction[4].u.operand; |
|
1191 int it = currentInstruction[5].u.operand; |
|
1192 int target = currentInstruction[6].u.operand; |
|
1193 |
|
1194 JumpList callHasProperty; |
|
1195 |
|
1196 Label begin(this); |
|
1197 load32(addressFor(i), regT0); |
|
1198 Jump end = branch32(Equal, regT0, addressFor(size)); |
|
1199 |
|
1200 // Grab key @ i |
|
1201 loadPtr(addressFor(it), regT1); |
|
1202 loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2); |
|
1203 load32(BaseIndex(regT2, regT0, TimesEight), regT2); |
|
1204 store32(Imm32(JSValue::CellTag), tagFor(dst)); |
|
1205 store32(regT2, payloadFor(dst)); |
|
1206 |
|
1207 // Increment i |
|
1208 add32(Imm32(1), regT0); |
|
1209 store32(regT0, addressFor(i)); |
|
1210 |
|
1211 // Verify that i is valid: |
|
1212 loadPtr(addressFor(base), regT0); |
|
1213 |
|
1214 // Test base's structure |
|
1215 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); |
|
1216 callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))))); |
|
1217 |
|
1218 // Test base's prototype chain |
|
1219 loadPtr(Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedPrototypeChain))), regT3); |
|
1220 loadPtr(Address(regT3, OBJECT_OFFSETOF(StructureChain, m_vector)), regT3); |
|
1221 addJump(branchTestPtr(Zero, Address(regT3)), target); |
|
1222 |
|
1223 Label checkPrototype(this); |
|
1224 callHasProperty.append(branch32(Equal, Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), Imm32(JSValue::NullTag))); |
|
1225 loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2); |
|
1226 loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); |
|
1227 callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3))); |
|
1228 addPtr(Imm32(sizeof(Structure*)), regT3); |
|
1229 branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this); |
|
1230 |
|
1231 // Continue loop. |
|
1232 addJump(jump(), target); |
|
1233 |
|
1234 // Slow case: Ask the object if i is valid. |
|
1235 callHasProperty.link(this); |
|
1236 loadPtr(addressFor(dst), regT1); |
|
1237 JITStubCall stubCall(this, cti_has_property); |
|
1238 stubCall.addArgument(regT0); |
|
1239 stubCall.addArgument(regT1); |
|
1240 stubCall.call(); |
|
1241 |
|
1242 // Test for valid key. |
|
1243 addJump(branchTest32(NonZero, regT0), target); |
|
1244 jump().linkTo(begin, this); |
|
1245 |
|
1246 // End of loop. |
|
1247 end.link(this); |
|
1248 } |
|
1249 |
|
1250 void JIT::emit_op_push_scope(Instruction* currentInstruction) |
|
1251 { |
|
1252 JITStubCall stubCall(this, cti_op_push_scope); |
|
1253 stubCall.addArgument(currentInstruction[1].u.operand); |
|
1254 stubCall.call(currentInstruction[1].u.operand); |
|
1255 } |
|
1256 |
|
1257 void JIT::emit_op_pop_scope(Instruction*) |
|
1258 { |
|
1259 JITStubCall(this, cti_op_pop_scope).call(); |
|
1260 } |
|
1261 |
|
1262 void JIT::emit_op_to_jsnumber(Instruction* currentInstruction) |
|
1263 { |
|
1264 int dst = currentInstruction[1].u.operand; |
|
1265 int src = currentInstruction[2].u.operand; |
|
1266 |
|
1267 emitLoad(src, regT1, regT0); |
|
1268 |
|
1269 Jump isInt32 = branch32(Equal, regT1, Imm32(JSValue::Int32Tag)); |
|
1270 addSlowCase(branch32(AboveOrEqual, regT1, Imm32(JSValue::EmptyValueTag))); |
|
1271 isInt32.link(this); |
|
1272 |
|
1273 if (src != dst) |
|
1274 emitStore(dst, regT1, regT0); |
|
1275 map(m_bytecodeOffset + OPCODE_LENGTH(op_to_jsnumber), dst, regT1, regT0); |
|
1276 } |
|
1277 |
|
1278 void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1279 { |
|
1280 int dst = currentInstruction[1].u.operand; |
|
1281 |
|
1282 linkSlowCase(iter); |
|
1283 |
|
1284 JITStubCall stubCall(this, cti_op_to_jsnumber); |
|
1285 stubCall.addArgument(regT1, regT0); |
|
1286 stubCall.call(dst); |
|
1287 } |
|
1288 |
|
1289 void JIT::emit_op_push_new_scope(Instruction* currentInstruction) |
|
1290 { |
|
1291 JITStubCall stubCall(this, cti_op_push_new_scope); |
|
1292 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); |
|
1293 stubCall.addArgument(currentInstruction[3].u.operand); |
|
1294 stubCall.call(currentInstruction[1].u.operand); |
|
1295 } |
|
1296 |
|
1297 void JIT::emit_op_catch(Instruction* currentInstruction) |
|
1298 { |
|
1299 unsigned exception = currentInstruction[1].u.operand; |
|
1300 |
|
1301 // This opcode only executes after a return from cti_op_throw. |
|
1302 |
|
1303 // cti_op_throw may have taken us to a call frame further up the stack; reload |
|
1304 // the call frame pointer to adjust. |
|
1305 peek(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); |
|
1306 |
|
1307 // Now store the exception returned by cti_op_throw. |
|
1308 emitStore(exception, regT1, regT0); |
|
1309 map(m_bytecodeOffset + OPCODE_LENGTH(op_catch), exception, regT1, regT0); |
|
1310 } |
|
1311 |
|
1312 void JIT::emit_op_jmp_scopes(Instruction* currentInstruction) |
|
1313 { |
|
1314 JITStubCall stubCall(this, cti_op_jmp_scopes); |
|
1315 stubCall.addArgument(Imm32(currentInstruction[1].u.operand)); |
|
1316 stubCall.call(); |
|
1317 addJump(jump(), currentInstruction[2].u.operand); |
|
1318 } |
|
1319 |
|
1320 void JIT::emit_op_switch_imm(Instruction* currentInstruction) |
|
1321 { |
|
1322 unsigned tableIndex = currentInstruction[1].u.operand; |
|
1323 unsigned defaultOffset = currentInstruction[2].u.operand; |
|
1324 unsigned scrutinee = currentInstruction[3].u.operand; |
|
1325 |
|
1326 // create jump table for switch destinations, track this switch statement. |
|
1327 SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex); |
|
1328 m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate)); |
|
1329 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size()); |
|
1330 |
|
1331 JITStubCall stubCall(this, cti_op_switch_imm); |
|
1332 stubCall.addArgument(scrutinee); |
|
1333 stubCall.addArgument(Imm32(tableIndex)); |
|
1334 stubCall.call(); |
|
1335 jump(regT0); |
|
1336 } |
|
1337 |
|
1338 void JIT::emit_op_switch_char(Instruction* currentInstruction) |
|
1339 { |
|
1340 unsigned tableIndex = currentInstruction[1].u.operand; |
|
1341 unsigned defaultOffset = currentInstruction[2].u.operand; |
|
1342 unsigned scrutinee = currentInstruction[3].u.operand; |
|
1343 |
|
1344 // create jump table for switch destinations, track this switch statement. |
|
1345 SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex); |
|
1346 m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character)); |
|
1347 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size()); |
|
1348 |
|
1349 JITStubCall stubCall(this, cti_op_switch_char); |
|
1350 stubCall.addArgument(scrutinee); |
|
1351 stubCall.addArgument(Imm32(tableIndex)); |
|
1352 stubCall.call(); |
|
1353 jump(regT0); |
|
1354 } |
|
1355 |
|
1356 void JIT::emit_op_switch_string(Instruction* currentInstruction) |
|
1357 { |
|
1358 unsigned tableIndex = currentInstruction[1].u.operand; |
|
1359 unsigned defaultOffset = currentInstruction[2].u.operand; |
|
1360 unsigned scrutinee = currentInstruction[3].u.operand; |
|
1361 |
|
1362 // create jump table for switch destinations, track this switch statement. |
|
1363 StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex); |
|
1364 m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset)); |
|
1365 |
|
1366 JITStubCall stubCall(this, cti_op_switch_string); |
|
1367 stubCall.addArgument(scrutinee); |
|
1368 stubCall.addArgument(Imm32(tableIndex)); |
|
1369 stubCall.call(); |
|
1370 jump(regT0); |
|
1371 } |
|
1372 |
|
1373 void JIT::emit_op_new_error(Instruction* currentInstruction) |
|
1374 { |
|
1375 unsigned dst = currentInstruction[1].u.operand; |
|
1376 unsigned type = currentInstruction[2].u.operand; |
|
1377 unsigned message = currentInstruction[3].u.operand; |
|
1378 |
|
1379 JITStubCall stubCall(this, cti_op_new_error); |
|
1380 stubCall.addArgument(Imm32(type)); |
|
1381 stubCall.addArgument(m_codeBlock->getConstant(message)); |
|
1382 stubCall.addArgument(Imm32(m_bytecodeOffset)); |
|
1383 stubCall.call(dst); |
|
1384 } |
|
1385 |
|
1386 void JIT::emit_op_debug(Instruction* currentInstruction) |
|
1387 { |
|
1388 #if ENABLE(DEBUG_WITH_BREAKPOINT) |
|
1389 UNUSED_PARAM(currentInstruction); |
|
1390 breakpoint(); |
|
1391 #else |
|
1392 JITStubCall stubCall(this, cti_op_debug); |
|
1393 stubCall.addArgument(Imm32(currentInstruction[1].u.operand)); |
|
1394 stubCall.addArgument(Imm32(currentInstruction[2].u.operand)); |
|
1395 stubCall.addArgument(Imm32(currentInstruction[3].u.operand)); |
|
1396 stubCall.call(); |
|
1397 #endif |
|
1398 } |
|
1399 |
|
1400 |
|
1401 void JIT::emit_op_enter(Instruction*) |
|
1402 { |
|
1403 // Even though JIT code doesn't use them, we initialize our constant |
|
1404 // registers to zap stale pointers, to avoid unnecessarily prolonging |
|
1405 // object lifetime and increasing GC pressure. |
|
1406 for (int i = 0; i < m_codeBlock->m_numVars; ++i) |
|
1407 emitStore(i, jsUndefined()); |
|
1408 } |
|
1409 |
|
1410 void JIT::emit_op_enter_with_activation(Instruction* currentInstruction) |
|
1411 { |
|
1412 emit_op_enter(currentInstruction); |
|
1413 |
|
1414 JITStubCall(this, cti_op_push_activation).call(currentInstruction[1].u.operand); |
|
1415 } |
|
1416 |
|
1417 void JIT::emit_op_create_arguments(Instruction* currentInstruction) |
|
1418 { |
|
1419 unsigned dst = currentInstruction[1].u.operand; |
|
1420 |
|
1421 Jump argsCreated = branch32(NotEqual, tagFor(dst), Imm32(JSValue::EmptyValueTag)); |
|
1422 |
|
1423 if (m_codeBlock->m_numParameters == 1) |
|
1424 JITStubCall(this, cti_op_create_arguments_no_params).call(); |
|
1425 else |
|
1426 JITStubCall(this, cti_op_create_arguments).call(); |
|
1427 |
|
1428 emitStore(dst, regT1, regT0); |
|
1429 emitStore(unmodifiedArgumentsRegister(dst), regT1, regT0); |
|
1430 |
|
1431 argsCreated.link(this); |
|
1432 } |
|
1433 |
|
1434 void JIT::emit_op_init_arguments(Instruction* currentInstruction) |
|
1435 { |
|
1436 unsigned dst = currentInstruction[1].u.operand; |
|
1437 |
|
1438 emitStore(dst, JSValue()); |
|
1439 emitStore(unmodifiedArgumentsRegister(dst), JSValue()); |
|
1440 } |
|
1441 |
|
1442 void JIT::emit_op_get_callee(Instruction* currentInstruction) |
|
1443 { |
|
1444 int dst = currentInstruction[1].u.operand; |
|
1445 emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT0); |
|
1446 emitStoreCell(dst, regT0); |
|
1447 } |
|
1448 |
|
1449 void JIT::emit_op_create_this(Instruction* currentInstruction) |
|
1450 { |
|
1451 unsigned protoRegister = currentInstruction[2].u.operand; |
|
1452 emitLoad(protoRegister, regT1, regT0); |
|
1453 JITStubCall stubCall(this, cti_op_create_this); |
|
1454 stubCall.addArgument(regT1, regT0); |
|
1455 stubCall.call(currentInstruction[1].u.operand); |
|
1456 } |
|
1457 |
|
1458 void JIT::emit_op_convert_this(Instruction* currentInstruction) |
|
1459 { |
|
1460 unsigned thisRegister = currentInstruction[1].u.operand; |
|
1461 |
|
1462 emitLoad(thisRegister, regT1, regT0); |
|
1463 |
|
1464 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag))); |
|
1465 |
|
1466 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); |
|
1467 addSlowCase(branchTest8(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion))); |
|
1468 |
|
1469 map(m_bytecodeOffset + OPCODE_LENGTH(op_convert_this), thisRegister, regT1, regT0); |
|
1470 } |
|
1471 |
|
1472 void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1473 { |
|
1474 unsigned thisRegister = currentInstruction[1].u.operand; |
|
1475 |
|
1476 linkSlowCase(iter); |
|
1477 linkSlowCase(iter); |
|
1478 |
|
1479 JITStubCall stubCall(this, cti_op_convert_this); |
|
1480 stubCall.addArgument(regT1, regT0); |
|
1481 stubCall.call(thisRegister); |
|
1482 } |
|
1483 |
|
1484 void JIT::emit_op_profile_will_call(Instruction* currentInstruction) |
|
1485 { |
|
1486 peek(regT2, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof(void*)); |
|
1487 Jump noProfiler = branchTestPtr(Zero, Address(regT2)); |
|
1488 |
|
1489 JITStubCall stubCall(this, cti_op_profile_will_call); |
|
1490 stubCall.addArgument(currentInstruction[1].u.operand); |
|
1491 stubCall.call(); |
|
1492 noProfiler.link(this); |
|
1493 } |
|
1494 |
|
1495 void JIT::emit_op_profile_did_call(Instruction* currentInstruction) |
|
1496 { |
|
1497 peek(regT2, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof(void*)); |
|
1498 Jump noProfiler = branchTestPtr(Zero, Address(regT2)); |
|
1499 |
|
1500 JITStubCall stubCall(this, cti_op_profile_did_call); |
|
1501 stubCall.addArgument(currentInstruction[1].u.operand); |
|
1502 stubCall.call(); |
|
1503 noProfiler.link(this); |
|
1504 } |
|
1505 |
|
1506 } // namespace JSC |
|
1507 |
|
1508 #endif // USE(JSVALUE32_64) |
|
1509 #endif // ENABLE(JIT) |