author | Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com> |
Tue, 06 Jul 2010 15:10:48 +0300 | |
changeset 30 | 5dc02b23752f |
parent 3 | 41300fa6a67c |
permissions | -rw-r--r-- |
0 | 1 |
/* |
2 |
* Copyright (C) 2008 Apple Inc. All rights reserved. |
|
3 |
* |
|
4 |
* Redistribution and use in source and binary forms, with or without |
|
5 |
* modification, are permitted provided that the following conditions |
|
6 |
* are met: |
|
7 |
* 1. Redistributions of source code must retain the above copyright |
|
8 |
* notice, this list of conditions and the following disclaimer. |
|
9 |
* 2. Redistributions in binary form must reproduce the above copyright |
|
10 |
* notice, this list of conditions and the following disclaimer in the |
|
11 |
* documentation and/or other materials provided with the distribution. |
|
12 |
* |
|
13 |
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
|
14 |
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
|
15 |
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|
16 |
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
|
17 |
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
|
18 |
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
|
19 |
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
|
20 |
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
|
21 |
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
|
22 |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
|
23 |
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|
24 |
*/ |
|
25 |
||
26 |
#include "config.h" |
|
27 |
#include "JIT.h" |
|
28 |
||
29 |
#if ENABLE(JIT) |
|
30 |
||
31 |
#include "CodeBlock.h" |
|
32 |
#include "JITInlineMethods.h" |
|
33 |
#include "JITStubCall.h" |
|
34 |
#include "JSArray.h" |
|
35 |
#include "JSFunction.h" |
|
36 |
#include "Interpreter.h" |
|
37 |
#include "ResultType.h" |
|
38 |
#include "SamplingTool.h" |
|
39 |
||
40 |
#ifndef NDEBUG |
|
41 |
#include <stdio.h> |
|
42 |
#endif |
|
43 |
||
44 |
using namespace std; |
|
45 |
||
46 |
namespace JSC { |
|
47 |
||
48 |
#if USE(JSVALUE32_64) |
|
49 |
||
50 |
void JIT::compileOpCallInitializeCallFrame() |
|
51 |
{ |
|
52 |
// regT0 holds callee, regT1 holds argCount |
|
53 |
store32(regT1, Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register)))); |
|
54 |
||
55 |
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // scopeChain |
|
56 |
||
57 |
emitStore(static_cast<unsigned>(RegisterFile::OptionalCalleeArguments), JSValue()); |
|
58 |
storePtr(regT0, Address(callFrameRegister, RegisterFile::Callee * static_cast<int>(sizeof(Register)))); // callee |
|
59 |
storePtr(regT1, Address(callFrameRegister, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register)))); // scopeChain |
|
60 |
} |
|
61 |
||
62 |
void JIT::compileOpCallSetupArgs(Instruction* instruction) |
|
63 |
{ |
|
64 |
int argCount = instruction[3].u.operand; |
|
65 |
int registerOffset = instruction[4].u.operand; |
|
66 |
||
67 |
emitPutJITStubArg(regT1, regT0, 0); |
|
68 |
emitPutJITStubArgConstant(registerOffset, 1); |
|
69 |
emitPutJITStubArgConstant(argCount, 2); |
|
70 |
} |
|
71 |
||
72 |
void JIT::compileOpConstructSetupArgs(Instruction* instruction) |
|
73 |
{ |
|
74 |
int argCount = instruction[3].u.operand; |
|
75 |
int registerOffset = instruction[4].u.operand; |
|
76 |
int proto = instruction[5].u.operand; |
|
77 |
int thisRegister = instruction[6].u.operand; |
|
78 |
||
79 |
emitPutJITStubArg(regT1, regT0, 0); |
|
80 |
emitPutJITStubArgConstant(registerOffset, 1); |
|
81 |
emitPutJITStubArgConstant(argCount, 2); |
|
82 |
emitPutJITStubArgFromVirtualRegister(proto, 3, regT2, regT3); |
|
83 |
emitPutJITStubArgConstant(thisRegister, 4); |
|
84 |
} |
|
85 |
||
86 |
void JIT::compileOpCallVarargsSetupArgs(Instruction*) |
|
87 |
{ |
|
88 |
emitPutJITStubArg(regT1, regT0, 0); |
|
89 |
emitPutJITStubArg(regT3, 1); // registerOffset |
|
90 |
emitPutJITStubArg(regT2, 2); // argCount |
|
91 |
} |
|
92 |
||
93 |
void JIT::compileOpCallVarargs(Instruction* instruction) |
|
94 |
{ |
|
95 |
int dst = instruction[1].u.operand; |
|
96 |
int callee = instruction[2].u.operand; |
|
97 |
int argCountRegister = instruction[3].u.operand; |
|
98 |
int registerOffset = instruction[4].u.operand; |
|
99 |
||
100 |
emitLoad(callee, regT1, regT0); |
|
101 |
emitLoadPayload(argCountRegister, regT2); // argCount |
|
102 |
addPtr(Imm32(registerOffset), regT2, regT3); // registerOffset |
|
103 |
||
104 |
compileOpCallVarargsSetupArgs(instruction); |
|
105 |
||
106 |
emitJumpSlowCaseIfNotJSCell(callee, regT1); |
|
107 |
addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr))); |
|
108 |
||
109 |
// Speculatively roll the callframe, assuming argCount will match the arity. |
|
110 |
mul32(Imm32(sizeof(Register)), regT3, regT3); |
|
111 |
addPtr(callFrameRegister, regT3); |
|
112 |
storePtr(callFrameRegister, Address(regT3, RegisterFile::CallerFrame * static_cast<int>(sizeof(Register)))); |
|
113 |
move(regT3, callFrameRegister); |
|
114 |
||
115 |
move(regT2, regT1); // argCount |
|
116 |
||
117 |
emitNakedCall(m_globalData->jitStubs.ctiVirtualCall()); |
|
118 |
||
119 |
emitStore(dst, regT1, regT0); |
|
120 |
||
121 |
sampleCodeBlock(m_codeBlock); |
|
122 |
} |
|
123 |
||
124 |
void JIT::compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter) |
|
125 |
{ |
|
126 |
int dst = instruction[1].u.operand; |
|
127 |
int callee = instruction[2].u.operand; |
|
128 |
||
129 |
linkSlowCaseIfNotJSCell(iter, callee); |
|
130 |
linkSlowCase(iter); |
|
131 |
||
132 |
JITStubCall stubCall(this, cti_op_call_NotJSFunction); |
|
133 |
stubCall.call(dst); // In the interpreter, the callee puts the return value in dst. |
|
134 |
||
135 |
map(m_bytecodeIndex + OPCODE_LENGTH(op_call_varargs), dst, regT1, regT0); |
|
136 |
sampleCodeBlock(m_codeBlock); |
|
137 |
} |
|
138 |
||
139 |
void JIT::emit_op_ret(Instruction* currentInstruction) |
|
140 |
{ |
|
141 |
unsigned dst = currentInstruction[1].u.operand; |
|
142 |
||
143 |
// We could JIT generate the deref, only calling out to C when the refcount hits zero. |
|
144 |
if (m_codeBlock->needsFullScopeChain()) |
|
145 |
JITStubCall(this, cti_op_ret_scopeChain).call(); |
|
146 |
||
147 |
emitLoad(dst, regT1, regT0); |
|
148 |
emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2); |
|
149 |
emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister); |
|
150 |
||
151 |
restoreReturnAddressBeforeReturn(regT2); |
|
152 |
ret(); |
|
153 |
} |
|
154 |
||
155 |
void JIT::emit_op_construct_verify(Instruction* currentInstruction) |
|
156 |
{ |
|
157 |
unsigned dst = currentInstruction[1].u.operand; |
|
158 |
||
159 |
emitLoad(dst, regT1, regT0); |
|
160 |
addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag))); |
|
161 |
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); |
|
30
5dc02b23752f
Revision: 201025
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
3
diff
changeset
|
162 |
addSlowCase(branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType))); |
0 | 163 |
} |
164 |
||
165 |
void JIT::emitSlow_op_construct_verify(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
166 |
{ |
|
167 |
unsigned dst = currentInstruction[1].u.operand; |
|
168 |
unsigned src = currentInstruction[2].u.operand; |
|
169 |
||
170 |
linkSlowCase(iter); |
|
171 |
linkSlowCase(iter); |
|
172 |
emitLoad(src, regT1, regT0); |
|
173 |
emitStore(dst, regT1, regT0); |
|
174 |
} |
|
175 |
||
176 |
void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
177 |
{ |
|
178 |
compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call); |
|
179 |
} |
|
180 |
||
181 |
void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
182 |
{ |
|
183 |
compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call_eval); |
|
184 |
} |
|
185 |
||
186 |
void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
187 |
{ |
|
188 |
compileOpCallVarargsSlowCase(currentInstruction, iter); |
|
189 |
} |
|
190 |
||
191 |
void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
192 |
{ |
|
193 |
compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_construct); |
|
194 |
} |
|
195 |
||
196 |
void JIT::emit_op_call(Instruction* currentInstruction) |
|
197 |
{ |
|
198 |
compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++); |
|
199 |
} |
|
200 |
||
201 |
void JIT::emit_op_call_eval(Instruction* currentInstruction) |
|
202 |
{ |
|
203 |
compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex++); |
|
204 |
} |
|
205 |
||
206 |
void JIT::emit_op_load_varargs(Instruction* currentInstruction) |
|
207 |
{ |
|
208 |
int argCountDst = currentInstruction[1].u.operand; |
|
209 |
int argsOffset = currentInstruction[2].u.operand; |
|
210 |
||
211 |
JITStubCall stubCall(this, cti_op_load_varargs); |
|
212 |
stubCall.addArgument(Imm32(argsOffset)); |
|
213 |
stubCall.call(); |
|
214 |
// Stores a naked int32 in the register file. |
|
215 |
store32(returnValueRegister, Address(callFrameRegister, argCountDst * sizeof(Register))); |
|
216 |
} |
|
217 |
||
218 |
void JIT::emit_op_call_varargs(Instruction* currentInstruction) |
|
219 |
{ |
|
220 |
compileOpCallVarargs(currentInstruction); |
|
221 |
} |
|
222 |
||
223 |
void JIT::emit_op_construct(Instruction* currentInstruction) |
|
224 |
{ |
|
225 |
compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++); |
|
226 |
} |
|
227 |
||
228 |
#if !ENABLE(JIT_OPTIMIZE_CALL) |
|
229 |
||
230 |
/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */ |
|
231 |
||
232 |
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned) |
|
233 |
{ |
|
234 |
int dst = instruction[1].u.operand; |
|
235 |
int callee = instruction[2].u.operand; |
|
236 |
int argCount = instruction[3].u.operand; |
|
237 |
int registerOffset = instruction[4].u.operand; |
|
238 |
||
239 |
Jump wasEval; |
|
240 |
if (opcodeID == op_call_eval) { |
|
241 |
JITStubCall stubCall(this, cti_op_call_eval); |
|
242 |
stubCall.addArgument(callee); |
|
243 |
stubCall.addArgument(JIT::Imm32(registerOffset)); |
|
244 |
stubCall.addArgument(JIT::Imm32(argCount)); |
|
245 |
stubCall.call(); |
|
246 |
wasEval = branch32(NotEqual, regT1, Imm32(JSValue::EmptyValueTag)); |
|
247 |
} |
|
248 |
||
249 |
emitLoad(callee, regT1, regT0); |
|
250 |
||
251 |
if (opcodeID == op_call) |
|
252 |
compileOpCallSetupArgs(instruction); |
|
253 |
else if (opcodeID == op_construct) |
|
254 |
compileOpConstructSetupArgs(instruction); |
|
255 |
||
256 |
emitJumpSlowCaseIfNotJSCell(callee, regT1); |
|
257 |
addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr))); |
|
258 |
||
259 |
// First, in the case of a construct, allocate the new object. |
|
260 |
if (opcodeID == op_construct) { |
|
261 |
JITStubCall(this, cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); |
|
262 |
emitLoad(callee, regT1, regT0); |
|
263 |
} |
|
264 |
||
265 |
// Speculatively roll the callframe, assuming argCount will match the arity. |
|
266 |
storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)))); |
|
267 |
addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister); |
|
268 |
move(Imm32(argCount), regT1); |
|
269 |
||
270 |
emitNakedCall(m_globalData->jitStubs.ctiVirtualCall()); |
|
271 |
||
272 |
if (opcodeID == op_call_eval) |
|
273 |
wasEval.link(this); |
|
274 |
||
275 |
emitStore(dst, regT1, regT0); |
|
276 |
||
277 |
sampleCodeBlock(m_codeBlock); |
|
278 |
} |
|
279 |
||
280 |
void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned, OpcodeID opcodeID) |
|
281 |
{ |
|
282 |
int dst = instruction[1].u.operand; |
|
283 |
int callee = instruction[2].u.operand; |
|
284 |
||
285 |
linkSlowCaseIfNotJSCell(iter, callee); |
|
286 |
linkSlowCase(iter); |
|
287 |
||
288 |
JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction); |
|
289 |
stubCall.call(dst); // In the interpreter, the callee puts the return value in dst. |
|
290 |
||
291 |
sampleCodeBlock(m_codeBlock); |
|
292 |
} |
|
293 |
||
294 |
#else // !ENABLE(JIT_OPTIMIZE_CALL) |
|
295 |
||
296 |
/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */ |
|
297 |
||
298 |
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex) |
|
299 |
{ |
|
300 |
int dst = instruction[1].u.operand; |
|
301 |
int callee = instruction[2].u.operand; |
|
302 |
int argCount = instruction[3].u.operand; |
|
303 |
int registerOffset = instruction[4].u.operand; |
|
304 |
||
305 |
Jump wasEval; |
|
306 |
if (opcodeID == op_call_eval) { |
|
307 |
JITStubCall stubCall(this, cti_op_call_eval); |
|
308 |
stubCall.addArgument(callee); |
|
309 |
stubCall.addArgument(JIT::Imm32(registerOffset)); |
|
310 |
stubCall.addArgument(JIT::Imm32(argCount)); |
|
311 |
stubCall.call(); |
|
312 |
wasEval = branch32(NotEqual, regT1, Imm32(JSValue::EmptyValueTag)); |
|
313 |
} |
|
314 |
||
315 |
emitLoad(callee, regT1, regT0); |
|
316 |
||
317 |
DataLabelPtr addressOfLinkedFunctionCheck; |
|
30
5dc02b23752f
Revision: 201025
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
3
diff
changeset
|
318 |
|
5dc02b23752f
Revision: 201025
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
3
diff
changeset
|
319 |
BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall); |
5dc02b23752f
Revision: 201025
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
3
diff
changeset
|
320 |
|
0 | 321 |
Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, ImmPtr(0)); |
30
5dc02b23752f
Revision: 201025
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
3
diff
changeset
|
322 |
|
5dc02b23752f
Revision: 201025
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
3
diff
changeset
|
323 |
END_UNINTERRUPTED_SEQUENCE(sequenceOpCall); |
5dc02b23752f
Revision: 201025
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
3
diff
changeset
|
324 |
|
0 | 325 |
addSlowCase(jumpToSlow); |
326 |
ASSERT(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow) == patchOffsetOpCallCompareToJump); |
|
327 |
m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; |
|
328 |
||
329 |
addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag))); |
|
330 |
||
331 |
// The following is the fast case, only used whan a callee can be linked. |
|
332 |
||
333 |
// In the case of OpConstruct, call out to a cti_ function to create the new object. |
|
334 |
if (opcodeID == op_construct) { |
|
335 |
int proto = instruction[5].u.operand; |
|
336 |
int thisRegister = instruction[6].u.operand; |
|
337 |
||
338 |
JITStubCall stubCall(this, cti_op_construct_JSConstruct); |
|
339 |
stubCall.addArgument(regT1, regT0); |
|
340 |
stubCall.addArgument(Imm32(0)); // FIXME: Remove this unused JITStub argument. |
|
341 |
stubCall.addArgument(Imm32(0)); // FIXME: Remove this unused JITStub argument. |
|
342 |
stubCall.addArgument(proto); |
|
343 |
stubCall.call(thisRegister); |
|
344 |
||
345 |
emitLoad(callee, regT1, regT0); |
|
346 |
} |
|
347 |
||
348 |
// Fast version of stack frame initialization, directly relative to edi. |
|
349 |
// Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee |
|
350 |
emitStore(registerOffset + RegisterFile::OptionalCalleeArguments, JSValue()); |
|
351 |
emitStore(registerOffset + RegisterFile::Callee, regT1, regT0); |
|
352 |
||
353 |
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // newScopeChain |
|
354 |
store32(Imm32(argCount), Address(callFrameRegister, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register)))); |
|
355 |
storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register)))); |
|
356 |
storePtr(regT1, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register)))); |
|
357 |
addPtr(Imm32(registerOffset * sizeof(Register)), callFrameRegister); |
|
358 |
||
359 |
// Call to the callee |
|
360 |
m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(); |
|
361 |
||
362 |
if (opcodeID == op_call_eval) |
|
363 |
wasEval.link(this); |
|
364 |
||
365 |
// Put the return value in dst. In the interpreter, op_ret does this. |
|
366 |
emitStore(dst, regT1, regT0); |
|
367 |
map(m_bytecodeIndex + opcodeLengths[opcodeID], dst, regT1, regT0); |
|
368 |
||
369 |
sampleCodeBlock(m_codeBlock); |
|
370 |
} |
|
371 |
||
372 |
void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID) |
|
373 |
{ |
|
374 |
int dst = instruction[1].u.operand; |
|
375 |
int callee = instruction[2].u.operand; |
|
376 |
int argCount = instruction[3].u.operand; |
|
377 |
int registerOffset = instruction[4].u.operand; |
|
378 |
||
379 |
linkSlowCase(iter); |
|
380 |
linkSlowCase(iter); |
|
381 |
||
382 |
// The arguments have been set up on the hot path for op_call_eval |
|
383 |
if (opcodeID == op_call) |
|
384 |
compileOpCallSetupArgs(instruction); |
|
385 |
else if (opcodeID == op_construct) |
|
386 |
compileOpConstructSetupArgs(instruction); |
|
387 |
||
388 |
// Fast check for JS function. |
|
389 |
Jump callLinkFailNotObject = branch32(NotEqual, regT1, Imm32(JSValue::CellTag)); |
|
390 |
Jump callLinkFailNotJSFunction = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr)); |
|
391 |
||
392 |
// First, in the case of a construct, allocate the new object. |
|
393 |
if (opcodeID == op_construct) { |
|
394 |
JITStubCall(this, cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); |
|
395 |
emitLoad(callee, regT1, regT0); |
|
396 |
} |
|
397 |
||
398 |
// Speculatively roll the callframe, assuming argCount will match the arity. |
|
399 |
storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)))); |
|
400 |
addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister); |
|
401 |
move(Imm32(argCount), regT1); |
|
402 |
||
403 |
m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_globalData->jitStubs.ctiVirtualCallLink()); |
|
404 |
||
405 |
// Put the return value in dst. |
|
406 |
emitStore(dst, regT1, regT0);; |
|
407 |
sampleCodeBlock(m_codeBlock); |
|
408 |
||
409 |
// If not, we need an extra case in the if below! |
|
410 |
ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval)); |
|
411 |
||
412 |
// Done! - return back to the hot path. |
|
413 |
if (opcodeID == op_construct) |
|
414 |
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_construct)); |
|
415 |
else |
|
416 |
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_call)); |
|
417 |
||
418 |
// This handles host functions |
|
419 |
callLinkFailNotObject.link(this); |
|
420 |
callLinkFailNotJSFunction.link(this); |
|
421 |
JITStubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction).call(); |
|
422 |
||
423 |
emitStore(dst, regT1, regT0);; |
|
424 |
sampleCodeBlock(m_codeBlock); |
|
425 |
} |
|
426 |
||
427 |
/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */ |
|
428 |
||
429 |
#endif // !ENABLE(JIT_OPTIMIZE_CALL) |
|
430 |
||
431 |
#else // USE(JSVALUE32_64) |
|
432 |
||
433 |
void JIT::compileOpCallInitializeCallFrame() |
|
434 |
{ |
|
435 |
store32(regT1, Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register)))); |
|
436 |
||
437 |
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // newScopeChain |
|
438 |
||
439 |
storePtr(ImmPtr(JSValue::encode(JSValue())), Address(callFrameRegister, RegisterFile::OptionalCalleeArguments * static_cast<int>(sizeof(Register)))); |
|
440 |
storePtr(regT0, Address(callFrameRegister, RegisterFile::Callee * static_cast<int>(sizeof(Register)))); |
|
441 |
storePtr(regT1, Address(callFrameRegister, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register)))); |
|
442 |
} |
|
443 |
||
444 |
void JIT::compileOpCallSetupArgs(Instruction* instruction) |
|
445 |
{ |
|
446 |
int argCount = instruction[3].u.operand; |
|
447 |
int registerOffset = instruction[4].u.operand; |
|
448 |
||
449 |
// ecx holds func |
|
450 |
emitPutJITStubArg(regT0, 0); |
|
451 |
emitPutJITStubArgConstant(argCount, 2); |
|
452 |
emitPutJITStubArgConstant(registerOffset, 1); |
|
453 |
} |
|
454 |
||
455 |
void JIT::compileOpCallVarargsSetupArgs(Instruction* instruction) |
|
456 |
{ |
|
457 |
int registerOffset = instruction[4].u.operand; |
|
458 |
||
459 |
// ecx holds func |
|
460 |
emitPutJITStubArg(regT0, 0); |
|
461 |
emitPutJITStubArg(regT1, 2); |
|
462 |
addPtr(Imm32(registerOffset), regT1, regT2); |
|
463 |
emitPutJITStubArg(regT2, 1); |
|
464 |
} |
|
465 |
||
466 |
void JIT::compileOpConstructSetupArgs(Instruction* instruction) |
|
467 |
{ |
|
468 |
int argCount = instruction[3].u.operand; |
|
469 |
int registerOffset = instruction[4].u.operand; |
|
470 |
int proto = instruction[5].u.operand; |
|
471 |
int thisRegister = instruction[6].u.operand; |
|
472 |
||
473 |
// ecx holds func |
|
474 |
emitPutJITStubArg(regT0, 0); |
|
475 |
emitPutJITStubArgConstant(registerOffset, 1); |
|
476 |
emitPutJITStubArgConstant(argCount, 2); |
|
477 |
emitPutJITStubArgFromVirtualRegister(proto, 3, regT2); |
|
478 |
emitPutJITStubArgConstant(thisRegister, 4); |
|
479 |
} |
|
480 |
||
481 |
void JIT::compileOpCallVarargs(Instruction* instruction) |
|
482 |
{ |
|
483 |
int dst = instruction[1].u.operand; |
|
484 |
int callee = instruction[2].u.operand; |
|
485 |
int argCountRegister = instruction[3].u.operand; |
|
486 |
||
487 |
emitGetVirtualRegister(argCountRegister, regT1); |
|
488 |
emitGetVirtualRegister(callee, regT0); |
|
489 |
compileOpCallVarargsSetupArgs(instruction); |
|
490 |
||
491 |
// Check for JSFunctions. |
|
492 |
emitJumpSlowCaseIfNotJSCell(regT0); |
|
493 |
addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr))); |
|
494 |
||
495 |
// Speculatively roll the callframe, assuming argCount will match the arity. |
|
496 |
mul32(Imm32(sizeof(Register)), regT2, regT2); |
|
497 |
intptr_t offset = (intptr_t)sizeof(Register) * (intptr_t)RegisterFile::CallerFrame; |
|
498 |
addPtr(Imm32((int32_t)offset), regT2, regT3); |
|
499 |
addPtr(callFrameRegister, regT3); |
|
500 |
storePtr(callFrameRegister, regT3); |
|
501 |
addPtr(regT2, callFrameRegister); |
|
502 |
emitNakedCall(m_globalData->jitStubs.ctiVirtualCall()); |
|
503 |
||
504 |
// Put the return value in dst. In the interpreter, op_ret does this. |
|
505 |
emitPutVirtualRegister(dst); |
|
506 |
||
507 |
sampleCodeBlock(m_codeBlock); |
|
508 |
} |
|
509 |
||
510 |
void JIT::compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter) |
|
511 |
{ |
|
512 |
int dst = instruction[1].u.operand; |
|
513 |
||
514 |
linkSlowCase(iter); |
|
515 |
linkSlowCase(iter); |
|
516 |
JITStubCall stubCall(this, cti_op_call_NotJSFunction); |
|
517 |
stubCall.call(dst); // In the interpreter, the callee puts the return value in dst. |
|
518 |
||
519 |
sampleCodeBlock(m_codeBlock); |
|
520 |
} |
|
521 |
||
522 |
#if !ENABLE(JIT_OPTIMIZE_CALL) |
|
523 |
||
524 |
/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */ |
|
525 |
||
526 |
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned) |
|
527 |
{ |
|
528 |
int dst = instruction[1].u.operand; |
|
529 |
int callee = instruction[2].u.operand; |
|
530 |
int argCount = instruction[3].u.operand; |
|
531 |
int registerOffset = instruction[4].u.operand; |
|
532 |
||
533 |
// Handle eval |
|
534 |
Jump wasEval; |
|
535 |
if (opcodeID == op_call_eval) { |
|
536 |
JITStubCall stubCall(this, cti_op_call_eval); |
|
537 |
stubCall.addArgument(callee, regT0); |
|
538 |
stubCall.addArgument(JIT::Imm32(registerOffset)); |
|
539 |
stubCall.addArgument(JIT::Imm32(argCount)); |
|
540 |
stubCall.call(); |
|
541 |
wasEval = branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(JSValue()))); |
|
542 |
} |
|
543 |
||
544 |
emitGetVirtualRegister(callee, regT0); |
|
545 |
// The arguments have been set up on the hot path for op_call_eval |
|
546 |
if (opcodeID == op_call) |
|
547 |
compileOpCallSetupArgs(instruction); |
|
548 |
else if (opcodeID == op_construct) |
|
549 |
compileOpConstructSetupArgs(instruction); |
|
550 |
||
551 |
// Check for JSFunctions. |
|
552 |
emitJumpSlowCaseIfNotJSCell(regT0); |
|
553 |
addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr))); |
|
554 |
||
555 |
// First, in the case of a construct, allocate the new object. |
|
556 |
if (opcodeID == op_construct) { |
|
557 |
JITStubCall(this, cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); |
|
558 |
emitGetVirtualRegister(callee, regT0); |
|
559 |
} |
|
560 |
||
561 |
// Speculatively roll the callframe, assuming argCount will match the arity. |
|
562 |
storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)))); |
|
563 |
addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister); |
|
564 |
move(Imm32(argCount), regT1); |
|
565 |
||
566 |
emitNakedCall(m_globalData->jitStubs.ctiVirtualCall()); |
|
567 |
||
568 |
if (opcodeID == op_call_eval) |
|
569 |
wasEval.link(this); |
|
570 |
||
571 |
// Put the return value in dst. In the interpreter, op_ret does this. |
|
572 |
emitPutVirtualRegister(dst); |
|
573 |
||
574 |
sampleCodeBlock(m_codeBlock); |
|
575 |
} |
|
576 |
||
577 |
void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned, OpcodeID opcodeID) |
|
578 |
{ |
|
579 |
int dst = instruction[1].u.operand; |
|
580 |
||
581 |
linkSlowCase(iter); |
|
582 |
linkSlowCase(iter); |
|
583 |
JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction); |
|
584 |
stubCall.call(dst); // In the interpreter, the callee puts the return value in dst. |
|
585 |
||
586 |
sampleCodeBlock(m_codeBlock); |
|
587 |
} |
|
588 |
||
589 |
#else // !ENABLE(JIT_OPTIMIZE_CALL) |
|
590 |
||
591 |
/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */ |
|
592 |
||
593 |
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex) |
|
594 |
{ |
|
595 |
int dst = instruction[1].u.operand; |
|
596 |
int callee = instruction[2].u.operand; |
|
597 |
int argCount = instruction[3].u.operand; |
|
598 |
int registerOffset = instruction[4].u.operand; |
|
599 |
||
600 |
// Handle eval |
|
601 |
Jump wasEval; |
|
602 |
if (opcodeID == op_call_eval) { |
|
603 |
JITStubCall stubCall(this, cti_op_call_eval); |
|
604 |
stubCall.addArgument(callee, regT0); |
|
605 |
stubCall.addArgument(JIT::Imm32(registerOffset)); |
|
606 |
stubCall.addArgument(JIT::Imm32(argCount)); |
|
607 |
stubCall.call(); |
|
608 |
wasEval = branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(JSValue()))); |
|
609 |
} |
|
610 |
||
611 |
// This plants a check for a cached JSFunction value, so we can plant a fast link to the callee. |
|
612 |
// This deliberately leaves the callee in ecx, used when setting up the stack frame below |
|
613 |
emitGetVirtualRegister(callee, regT0); |
|
614 |
DataLabelPtr addressOfLinkedFunctionCheck; |
|
615 |
||
616 |
BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall); |
|
617 |
||
618 |
Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, ImmPtr(JSValue::encode(JSValue()))); |
|
619 |
||
620 |
END_UNINTERRUPTED_SEQUENCE(sequenceOpCall); |
|
621 |
||
622 |
addSlowCase(jumpToSlow); |
|
3
41300fa6a67c
Revision: 201003
Dremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
parents:
0
diff
changeset
|
623 |
ASSERT_JIT_OFFSET(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow), patchOffsetOpCallCompareToJump); |
0 | 624 |
m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; |
625 |
||
626 |
// The following is the fast case, only used whan a callee can be linked. |
|
627 |
||
628 |
// In the case of OpConstruct, call out to a cti_ function to create the new object. |
|
629 |
if (opcodeID == op_construct) { |
|
630 |
int proto = instruction[5].u.operand; |
|
631 |
int thisRegister = instruction[6].u.operand; |
|
632 |
||
633 |
emitPutJITStubArg(regT0, 0); |
|
634 |
emitPutJITStubArgFromVirtualRegister(proto, 3, regT2); |
|
635 |
JITStubCall stubCall(this, cti_op_construct_JSConstruct); |
|
636 |
stubCall.call(thisRegister); |
|
637 |
emitGetVirtualRegister(callee, regT0); |
|
638 |
} |
|
639 |
||
640 |
// Fast version of stack frame initialization, directly relative to edi. |
|
641 |
// Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee |
|
642 |
storePtr(ImmPtr(JSValue::encode(JSValue())), Address(callFrameRegister, (registerOffset + RegisterFile::OptionalCalleeArguments) * static_cast<int>(sizeof(Register)))); |
|
643 |
storePtr(regT0, Address(callFrameRegister, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register)))); |
|
644 |
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // newScopeChain |
|
645 |
store32(Imm32(argCount), Address(callFrameRegister, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register)))); |
|
646 |
storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register)))); |
|
647 |
storePtr(regT1, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register)))); |
|
648 |
addPtr(Imm32(registerOffset * sizeof(Register)), callFrameRegister); |
|
649 |
||
650 |
// Call to the callee |
|
651 |
m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(); |
|
652 |
||
653 |
if (opcodeID == op_call_eval) |
|
654 |
wasEval.link(this); |
|
655 |
||
656 |
// Put the return value in dst. In the interpreter, op_ret does this. |
|
657 |
emitPutVirtualRegister(dst); |
|
658 |
||
659 |
sampleCodeBlock(m_codeBlock); |
|
660 |
} |
|
661 |
||
662 |
void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID) |
|
663 |
{ |
|
664 |
int dst = instruction[1].u.operand; |
|
665 |
int callee = instruction[2].u.operand; |
|
666 |
int argCount = instruction[3].u.operand; |
|
667 |
int registerOffset = instruction[4].u.operand; |
|
668 |
||
669 |
linkSlowCase(iter); |
|
670 |
||
671 |
// The arguments have been set up on the hot path for op_call_eval |
|
672 |
if (opcodeID == op_call) |
|
673 |
compileOpCallSetupArgs(instruction); |
|
674 |
else if (opcodeID == op_construct) |
|
675 |
compileOpConstructSetupArgs(instruction); |
|
676 |
||
677 |
// Fast check for JS function. |
|
678 |
Jump callLinkFailNotObject = emitJumpIfNotJSCell(regT0); |
|
679 |
Jump callLinkFailNotJSFunction = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr)); |
|
680 |
||
681 |
// First, in the case of a construct, allocate the new object. |
|
682 |
if (opcodeID == op_construct) { |
|
683 |
JITStubCall(this, cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); |
|
684 |
emitGetVirtualRegister(callee, regT0); |
|
685 |
} |
|
686 |
||
687 |
// Speculatively roll the callframe, assuming argCount will match the arity. |
|
688 |
storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)))); |
|
689 |
addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister); |
|
690 |
move(Imm32(argCount), regT1); |
|
691 |
||
692 |
move(regT0, regT2); |
|
693 |
||
694 |
m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_globalData->jitStubs.ctiVirtualCallLink()); |
|
695 |
||
696 |
// Put the return value in dst. |
|
697 |
emitPutVirtualRegister(dst); |
|
698 |
sampleCodeBlock(m_codeBlock); |
|
699 |
||
700 |
// If not, we need an extra case in the if below! |
|
701 |
ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval)); |
|
702 |
||
703 |
// Done! - return back to the hot path. |
|
704 |
if (opcodeID == op_construct) |
|
705 |
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_construct)); |
|
706 |
else |
|
707 |
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_call)); |
|
708 |
||
709 |
// This handles host functions |
|
710 |
callLinkFailNotObject.link(this); |
|
711 |
callLinkFailNotJSFunction.link(this); |
|
712 |
JITStubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction).call(); |
|
713 |
||
714 |
emitPutVirtualRegister(dst); |
|
715 |
sampleCodeBlock(m_codeBlock); |
|
716 |
} |
|
717 |
||
718 |
/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */ |
|
719 |
||
720 |
#endif // !ENABLE(JIT_OPTIMIZE_CALL) |
|
721 |
||
722 |
#endif // USE(JSVALUE32_64) |
|
723 |
||
724 |
} // namespace JSC |
|
725 |
||
726 |
#endif // ENABLE(JIT) |