0
|
1 |
/*
|
|
2 |
* Copyright (C) 2008 Apple Inc. All rights reserved.
|
|
3 |
*
|
|
4 |
* Redistribution and use in source and binary forms, with or without
|
|
5 |
* modification, are permitted provided that the following conditions
|
|
6 |
* are met:
|
|
7 |
* 1. Redistributions of source code must retain the above copyright
|
|
8 |
* notice, this list of conditions and the following disclaimer.
|
|
9 |
* 2. Redistributions in binary form must reproduce the above copyright
|
|
10 |
* notice, this list of conditions and the following disclaimer in the
|
|
11 |
* documentation and/or other materials provided with the distribution.
|
|
12 |
*
|
|
13 |
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
|
|
14 |
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
15 |
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
16 |
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
|
|
17 |
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
18 |
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
19 |
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
20 |
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
|
|
21 |
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
22 |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
23 |
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
24 |
*/
|
|
25 |
|
|
26 |
#ifndef JIT_h
|
|
27 |
#define JIT_h
|
|
28 |
|
|
29 |
#include <wtf/Platform.h>
|
|
30 |
|
|
31 |
#if ENABLE(JIT)
|
|
32 |
|
|
33 |
// We've run into some problems where changing the size of the class JIT leads to
|
|
34 |
// performance fluctuations. Try forcing alignment in an attempt to stabalize this.
|
|
35 |
#if COMPILER(GCC)
|
|
36 |
#define JIT_CLASS_ALIGNMENT __attribute__ ((aligned (32)))
|
|
37 |
#else
|
|
38 |
#define JIT_CLASS_ALIGNMENT
|
|
39 |
#endif
|
|
40 |
|
|
41 |
#include "CodeBlock.h"
|
|
42 |
#include "Interpreter.h"
|
|
43 |
#include "JITCode.h"
|
|
44 |
#include "JITStubs.h"
|
|
45 |
#include "Opcode.h"
|
|
46 |
#include "RegisterFile.h"
|
|
47 |
#include "MacroAssembler.h"
|
|
48 |
#include "Profiler.h"
|
|
49 |
#include <bytecode/SamplingTool.h>
|
|
50 |
#include <wtf/AlwaysInline.h>
|
|
51 |
#include <wtf/Vector.h>
|
|
52 |
|
|
53 |
namespace JSC {
|
|
54 |
|
|
55 |
class CodeBlock;
|
|
56 |
class JIT;
|
|
57 |
class JSPropertyNameIterator;
|
|
58 |
class Interpreter;
|
|
59 |
class Register;
|
|
60 |
class RegisterFile;
|
|
61 |
class ScopeChainNode;
|
|
62 |
class StructureChain;
|
|
63 |
|
|
64 |
struct CallLinkInfo;
|
|
65 |
struct Instruction;
|
|
66 |
struct OperandTypes;
|
|
67 |
struct PolymorphicAccessStructureList;
|
|
68 |
struct SimpleJumpTable;
|
|
69 |
struct StringJumpTable;
|
|
70 |
struct StructureStubInfo;
|
|
71 |
|
|
72 |
struct CallRecord {
|
|
73 |
MacroAssembler::Call from;
|
|
74 |
unsigned bytecodeIndex;
|
|
75 |
void* to;
|
|
76 |
|
|
77 |
CallRecord()
|
|
78 |
{
|
|
79 |
}
|
|
80 |
|
|
81 |
CallRecord(MacroAssembler::Call from, unsigned bytecodeIndex, void* to = 0)
|
|
82 |
: from(from)
|
|
83 |
, bytecodeIndex(bytecodeIndex)
|
|
84 |
, to(to)
|
|
85 |
{
|
|
86 |
}
|
|
87 |
};
|
|
88 |
|
|
89 |
struct JumpTable {
|
|
90 |
MacroAssembler::Jump from;
|
|
91 |
unsigned toBytecodeIndex;
|
|
92 |
|
|
93 |
JumpTable(MacroAssembler::Jump f, unsigned t)
|
|
94 |
: from(f)
|
|
95 |
, toBytecodeIndex(t)
|
|
96 |
{
|
|
97 |
}
|
|
98 |
};
|
|
99 |
|
|
100 |
struct SlowCaseEntry {
|
|
101 |
MacroAssembler::Jump from;
|
|
102 |
unsigned to;
|
|
103 |
unsigned hint;
|
|
104 |
|
|
105 |
SlowCaseEntry(MacroAssembler::Jump f, unsigned t, unsigned h = 0)
|
|
106 |
: from(f)
|
|
107 |
, to(t)
|
|
108 |
, hint(h)
|
|
109 |
{
|
|
110 |
}
|
|
111 |
};
|
|
112 |
|
|
113 |
struct SwitchRecord {
|
|
114 |
enum Type {
|
|
115 |
Immediate,
|
|
116 |
Character,
|
|
117 |
String
|
|
118 |
};
|
|
119 |
|
|
120 |
Type type;
|
|
121 |
|
|
122 |
union {
|
|
123 |
SimpleJumpTable* simpleJumpTable;
|
|
124 |
StringJumpTable* stringJumpTable;
|
|
125 |
} jumpTable;
|
|
126 |
|
|
127 |
unsigned bytecodeIndex;
|
|
128 |
unsigned defaultOffset;
|
|
129 |
|
|
130 |
SwitchRecord(SimpleJumpTable* jumpTable, unsigned bytecodeIndex, unsigned defaultOffset, Type type)
|
|
131 |
: type(type)
|
|
132 |
, bytecodeIndex(bytecodeIndex)
|
|
133 |
, defaultOffset(defaultOffset)
|
|
134 |
{
|
|
135 |
this->jumpTable.simpleJumpTable = jumpTable;
|
|
136 |
}
|
|
137 |
|
|
138 |
SwitchRecord(StringJumpTable* jumpTable, unsigned bytecodeIndex, unsigned defaultOffset)
|
|
139 |
: type(String)
|
|
140 |
, bytecodeIndex(bytecodeIndex)
|
|
141 |
, defaultOffset(defaultOffset)
|
|
142 |
{
|
|
143 |
this->jumpTable.stringJumpTable = jumpTable;
|
|
144 |
}
|
|
145 |
};
|
|
146 |
|
|
147 |
struct PropertyStubCompilationInfo {
|
|
148 |
MacroAssembler::Call callReturnLocation;
|
|
149 |
MacroAssembler::Label hotPathBegin;
|
|
150 |
};
|
|
151 |
|
|
152 |
struct StructureStubCompilationInfo {
|
|
153 |
MacroAssembler::DataLabelPtr hotPathBegin;
|
|
154 |
MacroAssembler::Call hotPathOther;
|
|
155 |
MacroAssembler::Call callReturnLocation;
|
|
156 |
};
|
|
157 |
|
|
158 |
struct MethodCallCompilationInfo {
|
|
159 |
MethodCallCompilationInfo(unsigned propertyAccessIndex)
|
|
160 |
: propertyAccessIndex(propertyAccessIndex)
|
|
161 |
{
|
|
162 |
}
|
|
163 |
|
|
164 |
MacroAssembler::DataLabelPtr structureToCompare;
|
|
165 |
unsigned propertyAccessIndex;
|
|
166 |
};
|
|
167 |
|
|
168 |
// Near calls can only be patched to other JIT code, regular calls can be patched to JIT code or relinked to stub functions.
|
|
169 |
void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction);
|
|
170 |
void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction);
|
|
171 |
void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction);
|
|
172 |
|
|
173 |
class JIT : private MacroAssembler {
|
|
174 |
friend class JITStubCall;
|
|
175 |
|
|
176 |
using MacroAssembler::Jump;
|
|
177 |
using MacroAssembler::JumpList;
|
|
178 |
using MacroAssembler::Label;
|
|
179 |
|
|
180 |
// NOTES:
|
|
181 |
//
|
|
182 |
// regT0 has two special meanings. The return value from a stub
|
|
183 |
// call will always be in regT0, and by default (unless
|
|
184 |
// a register is specified) emitPutVirtualRegister() will store
|
|
185 |
// the value from regT0.
|
|
186 |
//
|
|
187 |
// regT3 is required to be callee-preserved.
|
|
188 |
//
|
|
189 |
// tempRegister2 is has no such dependencies. It is important that
|
|
190 |
// on x86/x86-64 it is ecx for performance reasons, since the
|
|
191 |
// MacroAssembler will need to plant register swaps if it is not -
|
|
192 |
// however the code will still function correctly.
|
|
193 |
#if PLATFORM(X86_64)
|
|
194 |
static const RegisterID returnValueRegister = X86Registers::eax;
|
|
195 |
static const RegisterID cachedResultRegister = X86Registers::eax;
|
|
196 |
static const RegisterID firstArgumentRegister = X86Registers::edi;
|
|
197 |
|
|
198 |
static const RegisterID timeoutCheckRegister = X86Registers::r12;
|
|
199 |
static const RegisterID callFrameRegister = X86Registers::r13;
|
|
200 |
static const RegisterID tagTypeNumberRegister = X86Registers::r14;
|
|
201 |
static const RegisterID tagMaskRegister = X86Registers::r15;
|
|
202 |
|
|
203 |
static const RegisterID regT0 = X86Registers::eax;
|
|
204 |
static const RegisterID regT1 = X86Registers::edx;
|
|
205 |
static const RegisterID regT2 = X86Registers::ecx;
|
|
206 |
static const RegisterID regT3 = X86Registers::ebx;
|
|
207 |
|
|
208 |
static const FPRegisterID fpRegT0 = X86Registers::xmm0;
|
|
209 |
static const FPRegisterID fpRegT1 = X86Registers::xmm1;
|
|
210 |
static const FPRegisterID fpRegT2 = X86Registers::xmm2;
|
|
211 |
#elif PLATFORM(X86)
|
|
212 |
static const RegisterID returnValueRegister = X86Registers::eax;
|
|
213 |
static const RegisterID cachedResultRegister = X86Registers::eax;
|
|
214 |
// On x86 we always use fastcall conventions = but on
|
|
215 |
// OS X if might make more sense to just use regparm.
|
|
216 |
static const RegisterID firstArgumentRegister = X86Registers::ecx;
|
|
217 |
|
|
218 |
static const RegisterID timeoutCheckRegister = X86Registers::esi;
|
|
219 |
static const RegisterID callFrameRegister = X86Registers::edi;
|
|
220 |
|
|
221 |
static const RegisterID regT0 = X86Registers::eax;
|
|
222 |
static const RegisterID regT1 = X86Registers::edx;
|
|
223 |
static const RegisterID regT2 = X86Registers::ecx;
|
|
224 |
static const RegisterID regT3 = X86Registers::ebx;
|
|
225 |
|
|
226 |
static const FPRegisterID fpRegT0 = X86Registers::xmm0;
|
|
227 |
static const FPRegisterID fpRegT1 = X86Registers::xmm1;
|
|
228 |
static const FPRegisterID fpRegT2 = X86Registers::xmm2;
|
|
229 |
#elif PLATFORM(ARM_THUMB2)
|
|
230 |
static const RegisterID returnValueRegister = ARMRegisters::r0;
|
|
231 |
static const RegisterID cachedResultRegister = ARMRegisters::r0;
|
|
232 |
static const RegisterID firstArgumentRegister = ARMRegisters::r0;
|
|
233 |
|
|
234 |
static const RegisterID regT0 = ARMRegisters::r0;
|
|
235 |
static const RegisterID regT1 = ARMRegisters::r1;
|
|
236 |
static const RegisterID regT2 = ARMRegisters::r2;
|
|
237 |
static const RegisterID regT3 = ARMRegisters::r4;
|
|
238 |
|
|
239 |
static const RegisterID callFrameRegister = ARMRegisters::r5;
|
|
240 |
static const RegisterID timeoutCheckRegister = ARMRegisters::r6;
|
|
241 |
|
|
242 |
static const FPRegisterID fpRegT0 = ARMRegisters::d0;
|
|
243 |
static const FPRegisterID fpRegT1 = ARMRegisters::d1;
|
|
244 |
static const FPRegisterID fpRegT2 = ARMRegisters::d2;
|
|
245 |
#elif PLATFORM(ARM_TRADITIONAL)
|
|
246 |
static const RegisterID returnValueRegister = ARMRegisters::r0;
|
|
247 |
static const RegisterID cachedResultRegister = ARMRegisters::r0;
|
|
248 |
static const RegisterID firstArgumentRegister = ARMRegisters::r0;
|
|
249 |
|
|
250 |
static const RegisterID timeoutCheckRegister = ARMRegisters::r5;
|
|
251 |
static const RegisterID callFrameRegister = ARMRegisters::r4;
|
|
252 |
static const RegisterID ctiReturnRegister = ARMRegisters::r6;
|
|
253 |
|
|
254 |
static const RegisterID regT0 = ARMRegisters::r0;
|
|
255 |
static const RegisterID regT1 = ARMRegisters::r1;
|
|
256 |
static const RegisterID regT2 = ARMRegisters::r2;
|
|
257 |
// Callee preserved
|
|
258 |
static const RegisterID regT3 = ARMRegisters::r7;
|
|
259 |
|
|
260 |
static const RegisterID regS0 = ARMRegisters::S0;
|
|
261 |
// Callee preserved
|
|
262 |
static const RegisterID regS1 = ARMRegisters::S1;
|
|
263 |
|
|
264 |
static const RegisterID regStackPtr = ARMRegisters::sp;
|
|
265 |
static const RegisterID regLink = ARMRegisters::lr;
|
|
266 |
|
|
267 |
static const FPRegisterID fpRegT0 = ARMRegisters::d0;
|
|
268 |
static const FPRegisterID fpRegT1 = ARMRegisters::d1;
|
|
269 |
static const FPRegisterID fpRegT2 = ARMRegisters::d2;
|
|
270 |
#else
|
|
271 |
#error "JIT not supported on this platform."
|
|
272 |
#endif
|
|
273 |
|
|
274 |
static const int patchGetByIdDefaultStructure = -1;
|
|
275 |
// Magic number - initial offset cannot be representable as a signed 8bit value, or the X86Assembler
|
|
276 |
// will compress the displacement, and we may not be able to fit a patched offset.
|
|
277 |
static const int patchGetByIdDefaultOffset = 256;
|
|
278 |
|
|
279 |
public:
|
|
280 |
static JITCode compile(JSGlobalData* globalData, CodeBlock* codeBlock)
|
|
281 |
{
|
|
282 |
return JIT(globalData, codeBlock).privateCompile();
|
|
283 |
}
|
|
284 |
|
|
285 |
static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress)
|
|
286 |
{
|
|
287 |
JIT jit(globalData, codeBlock);
|
|
288 |
jit.privateCompileGetByIdProto(stubInfo, structure, prototypeStructure, cachedOffset, returnAddress, callFrame);
|
|
289 |
}
|
|
290 |
|
|
291 |
static void compileGetByIdSelfList(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
|
|
292 |
{
|
|
293 |
JIT jit(globalData, codeBlock);
|
|
294 |
jit.privateCompileGetByIdSelfList(stubInfo, polymorphicStructures, currentIndex, structure, cachedOffset);
|
|
295 |
}
|
|
296 |
static void compileGetByIdProtoList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset)
|
|
297 |
{
|
|
298 |
JIT jit(globalData, codeBlock);
|
|
299 |
jit.privateCompileGetByIdProtoList(stubInfo, prototypeStructureList, currentIndex, structure, prototypeStructure, cachedOffset, callFrame);
|
|
300 |
}
|
|
301 |
static void compileGetByIdChainList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset)
|
|
302 |
{
|
|
303 |
JIT jit(globalData, codeBlock);
|
|
304 |
jit.privateCompileGetByIdChainList(stubInfo, prototypeStructureList, currentIndex, structure, chain, count, cachedOffset, callFrame);
|
|
305 |
}
|
|
306 |
|
|
307 |
static void compileGetByIdChain(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress)
|
|
308 |
{
|
|
309 |
JIT jit(globalData, codeBlock);
|
|
310 |
jit.privateCompileGetByIdChain(stubInfo, structure, chain, count, cachedOffset, returnAddress, callFrame);
|
|
311 |
}
|
|
312 |
|
|
313 |
static void compilePutByIdTransition(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress)
|
|
314 |
{
|
|
315 |
JIT jit(globalData, codeBlock);
|
|
316 |
jit.privateCompilePutByIdTransition(stubInfo, oldStructure, newStructure, cachedOffset, chain, returnAddress);
|
|
317 |
}
|
|
318 |
|
|
319 |
static void compileCTIMachineTrampolines(JSGlobalData* globalData, RefPtr<ExecutablePool>* executablePool, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk)
|
|
320 |
{
|
|
321 |
JIT jit(globalData);
|
|
322 |
jit.privateCompileCTIMachineTrampolines(executablePool, globalData, ctiStringLengthTrampoline, ctiVirtualCallLink, ctiVirtualCall, ctiNativeCallThunk);
|
|
323 |
}
|
|
324 |
|
|
325 |
static void patchGetByIdSelf(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress);
|
|
326 |
static void patchPutByIdReplace(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress);
|
|
327 |
static void patchMethodCallProto(CodeBlock* codeblock, MethodCallLinkInfo&, JSFunction*, Structure*, JSObject*, ReturnAddressPtr);
|
|
328 |
|
|
329 |
static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, ReturnAddressPtr returnAddress)
|
|
330 |
{
|
|
331 |
JIT jit(globalData, codeBlock);
|
|
332 |
return jit.privateCompilePatchGetArrayLength(returnAddress);
|
|
333 |
}
|
|
334 |
|
|
335 |
static void linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JITCode&, CallLinkInfo*, int callerArgCount, JSGlobalData*);
|
|
336 |
static void unlinkCall(CallLinkInfo*);
|
|
337 |
|
|
338 |
private:
|
|
339 |
struct JSRInfo {
|
|
340 |
DataLabelPtr storeLocation;
|
|
341 |
Label target;
|
|
342 |
|
|
343 |
JSRInfo(DataLabelPtr storeLocation, Label targetLocation)
|
|
344 |
: storeLocation(storeLocation)
|
|
345 |
, target(targetLocation)
|
|
346 |
{
|
|
347 |
}
|
|
348 |
};
|
|
349 |
|
|
350 |
JIT(JSGlobalData*, CodeBlock* = 0);
|
|
351 |
|
|
352 |
void privateCompileMainPass();
|
|
353 |
void privateCompileLinkPass();
|
|
354 |
void privateCompileSlowCases();
|
|
355 |
JITCode privateCompile();
|
|
356 |
void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame);
|
|
357 |
void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, size_t cachedOffset);
|
|
358 |
void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame);
|
|
359 |
void privateCompileGetByIdChainList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame);
|
|
360 |
void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame);
|
|
361 |
void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, size_t cachedOffset, StructureChain*, ReturnAddressPtr returnAddress);
|
|
362 |
|
|
363 |
void privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* data, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk);
|
|
364 |
void privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress);
|
|
365 |
|
|
366 |
void addSlowCase(Jump);
|
|
367 |
void addSlowCase(JumpList);
|
|
368 |
void addJump(Jump, int);
|
|
369 |
void emitJumpSlowToHot(Jump, int);
|
|
370 |
|
|
371 |
void compileOpCall(OpcodeID, Instruction* instruction, unsigned callLinkInfoIndex);
|
|
372 |
void compileOpCallVarargs(Instruction* instruction);
|
|
373 |
void compileOpCallInitializeCallFrame();
|
|
374 |
void compileOpCallSetupArgs(Instruction*);
|
|
375 |
void compileOpCallVarargsSetupArgs(Instruction*);
|
|
376 |
void compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID);
|
|
377 |
void compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter);
|
|
378 |
void compileOpConstructSetupArgs(Instruction*);
|
|
379 |
|
|
380 |
enum CompileOpStrictEqType { OpStrictEq, OpNStrictEq };
|
|
381 |
void compileOpStrictEq(Instruction* instruction, CompileOpStrictEqType type);
|
|
382 |
bool isOperandConstantImmediateDouble(unsigned src);
|
|
383 |
|
|
384 |
void emitLoadDouble(unsigned index, FPRegisterID value);
|
|
385 |
void emitLoadInt32ToDouble(unsigned index, FPRegisterID value);
|
|
386 |
|
|
387 |
Address addressFor(unsigned index, RegisterID base = callFrameRegister);
|
|
388 |
|
|
389 |
void testPrototype(Structure*, JumpList& failureCases);
|
|
390 |
|
|
391 |
#if USE(JSVALUE32_64)
|
|
392 |
Address tagFor(unsigned index, RegisterID base = callFrameRegister);
|
|
393 |
Address payloadFor(unsigned index, RegisterID base = callFrameRegister);
|
|
394 |
|
|
395 |
bool getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant);
|
|
396 |
|
|
397 |
void emitLoadTag(unsigned index, RegisterID tag);
|
|
398 |
void emitLoadPayload(unsigned index, RegisterID payload);
|
|
399 |
|
|
400 |
void emitLoad(const JSValue& v, RegisterID tag, RegisterID payload);
|
|
401 |
void emitLoad(unsigned index, RegisterID tag, RegisterID payload, RegisterID base = callFrameRegister);
|
|
402 |
void emitLoad2(unsigned index1, RegisterID tag1, RegisterID payload1, unsigned index2, RegisterID tag2, RegisterID payload2);
|
|
403 |
|
|
404 |
void emitStore(unsigned index, RegisterID tag, RegisterID payload, RegisterID base = callFrameRegister);
|
|
405 |
void emitStore(unsigned index, const JSValue constant, RegisterID base = callFrameRegister);
|
|
406 |
void emitStoreInt32(unsigned index, RegisterID payload, bool indexIsInt32 = false);
|
|
407 |
void emitStoreInt32(unsigned index, Imm32 payload, bool indexIsInt32 = false);
|
|
408 |
void emitStoreCell(unsigned index, RegisterID payload, bool indexIsCell = false);
|
|
409 |
void emitStoreBool(unsigned index, RegisterID tag, bool indexIsBool = false);
|
|
410 |
void emitStoreDouble(unsigned index, FPRegisterID value);
|
|
411 |
|
|
412 |
bool isLabeled(unsigned bytecodeIndex);
|
|
413 |
void map(unsigned bytecodeIndex, unsigned virtualRegisterIndex, RegisterID tag, RegisterID payload);
|
|
414 |
void unmap(RegisterID);
|
|
415 |
void unmap();
|
|
416 |
bool isMapped(unsigned virtualRegisterIndex);
|
|
417 |
bool getMappedPayload(unsigned virtualRegisterIndex, RegisterID& payload);
|
|
418 |
bool getMappedTag(unsigned virtualRegisterIndex, RegisterID& tag);
|
|
419 |
|
|
420 |
void emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex);
|
|
421 |
void emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex, RegisterID tag);
|
|
422 |
void linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&, unsigned virtualRegisterIndex);
|
|
423 |
|
|
424 |
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
|
|
425 |
void compileGetByIdHotPath();
|
|
426 |
void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck = false);
|
|
427 |
#endif
|
|
428 |
void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, Structure* structure, size_t cachedOffset);
|
|
429 |
void compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset);
|
|
430 |
void compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, Structure* structure, size_t cachedOffset);
|
|
431 |
|
|
432 |
// Arithmetic opcode helpers
|
|
433 |
void emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType);
|
|
434 |
void emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType);
|
|
435 |
void emitBinaryDoubleOp(OpcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters = true, bool op2IsInRegisters = true);
|
|
436 |
|
|
437 |
#if PLATFORM(X86)
|
|
438 |
// These architecture specific value are used to enable patching - see comment on op_put_by_id.
|
|
439 |
static const int patchOffsetPutByIdStructure = 7;
|
|
440 |
static const int patchOffsetPutByIdExternalLoad = 13;
|
|
441 |
static const int patchLengthPutByIdExternalLoad = 3;
|
|
442 |
static const int patchOffsetPutByIdPropertyMapOffset1 = 22;
|
|
443 |
static const int patchOffsetPutByIdPropertyMapOffset2 = 28;
|
|
444 |
// These architecture specific value are used to enable patching - see comment on op_get_by_id.
|
|
445 |
static const int patchOffsetGetByIdStructure = 7;
|
|
446 |
static const int patchOffsetGetByIdBranchToSlowCase = 13;
|
|
447 |
static const int patchOffsetGetByIdExternalLoad = 13;
|
|
448 |
static const int patchLengthGetByIdExternalLoad = 3;
|
|
449 |
static const int patchOffsetGetByIdPropertyMapOffset1 = 22;
|
|
450 |
static const int patchOffsetGetByIdPropertyMapOffset2 = 28;
|
|
451 |
static const int patchOffsetGetByIdPutResult = 28;
|
|
452 |
#if ENABLE(OPCODE_SAMPLING) && USE(JIT_STUB_ARGUMENT_VA_LIST)
|
|
453 |
static const int patchOffsetGetByIdSlowCaseCall = 35;
|
|
454 |
#elif ENABLE(OPCODE_SAMPLING)
|
|
455 |
static const int patchOffsetGetByIdSlowCaseCall = 37;
|
|
456 |
#elif USE(JIT_STUB_ARGUMENT_VA_LIST)
|
|
457 |
static const int patchOffsetGetByIdSlowCaseCall = 25;
|
|
458 |
#else
|
|
459 |
static const int patchOffsetGetByIdSlowCaseCall = 27;
|
|
460 |
#endif
|
|
461 |
static const int patchOffsetOpCallCompareToJump = 6;
|
|
462 |
|
|
463 |
static const int patchOffsetMethodCheckProtoObj = 11;
|
|
464 |
static const int patchOffsetMethodCheckProtoStruct = 18;
|
|
465 |
static const int patchOffsetMethodCheckPutFunction = 29;
|
|
466 |
#else
|
|
467 |
#error "JSVALUE32_64 not supported on this platform."
|
|
468 |
#endif
|
|
469 |
|
|
470 |
#else // USE(JSVALUE32_64)
|
|
471 |
void emitGetVirtualRegister(int src, RegisterID dst);
|
|
472 |
void emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2);
|
|
473 |
void emitPutVirtualRegister(unsigned dst, RegisterID from = regT0);
|
|
474 |
|
|
475 |
int32_t getConstantOperandImmediateInt(unsigned src);
|
|
476 |
|
|
477 |
void emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst);
|
|
478 |
void emitPutVariableObjectRegister(RegisterID src, RegisterID variableObject, int index);
|
|
479 |
|
|
480 |
void killLastResultRegister();
|
|
481 |
|
|
482 |
Jump emitJumpIfJSCell(RegisterID);
|
|
483 |
Jump emitJumpIfBothJSCells(RegisterID, RegisterID, RegisterID);
|
|
484 |
void emitJumpSlowCaseIfJSCell(RegisterID);
|
|
485 |
Jump emitJumpIfNotJSCell(RegisterID);
|
|
486 |
void emitJumpSlowCaseIfNotJSCell(RegisterID);
|
|
487 |
void emitJumpSlowCaseIfNotJSCell(RegisterID, int VReg);
|
|
488 |
#if USE(JSVALUE64)
|
|
489 |
JIT::Jump emitJumpIfImmediateNumber(RegisterID);
|
|
490 |
JIT::Jump emitJumpIfNotImmediateNumber(RegisterID);
|
|
491 |
#else
|
|
492 |
JIT::Jump emitJumpIfImmediateNumber(RegisterID reg)
|
|
493 |
{
|
|
494 |
return emitJumpIfImmediateInteger(reg);
|
|
495 |
}
|
|
496 |
|
|
497 |
JIT::Jump emitJumpIfNotImmediateNumber(RegisterID reg)
|
|
498 |
{
|
|
499 |
return emitJumpIfNotImmediateInteger(reg);
|
|
500 |
}
|
|
501 |
#endif
|
|
502 |
JIT::Jump emitJumpIfImmediateInteger(RegisterID);
|
|
503 |
JIT::Jump emitJumpIfNotImmediateInteger(RegisterID);
|
|
504 |
JIT::Jump emitJumpIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
|
|
505 |
void emitJumpSlowCaseIfNotImmediateInteger(RegisterID);
|
|
506 |
void emitJumpSlowCaseIfNotImmediateNumber(RegisterID);
|
|
507 |
void emitJumpSlowCaseIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
|
|
508 |
|
|
509 |
#if !USE(JSVALUE64)
|
|
510 |
void emitFastArithDeTagImmediate(RegisterID);
|
|
511 |
Jump emitFastArithDeTagImmediateJumpIfZero(RegisterID);
|
|
512 |
#endif
|
|
513 |
void emitFastArithReTagImmediate(RegisterID src, RegisterID dest);
|
|
514 |
void emitFastArithImmToInt(RegisterID);
|
|
515 |
void emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest);
|
|
516 |
|
|
517 |
void emitTagAsBoolImmediate(RegisterID reg);
|
|
518 |
void compileBinaryArithOp(OpcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi);
|
|
519 |
#if USE(JSVALUE64)
|
|
520 |
void compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase);
|
|
521 |
#else
|
|
522 |
void compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes);
|
|
523 |
#endif
|
|
524 |
|
|
525 |
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
|
|
526 |
void compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident, unsigned propertyAccessInstructionIndex);
|
|
527 |
void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck = false);
|
|
528 |
#endif
|
|
529 |
void compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset);
|
|
530 |
void compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID result, size_t cachedOffset);
|
|
531 |
void compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset);
|
|
532 |
|
|
533 |
#if PLATFORM(X86_64)
|
|
534 |
// These architecture specific value are used to enable patching - see comment on op_put_by_id.
|
|
535 |
static const int patchOffsetPutByIdStructure = 10;
|
|
536 |
static const int patchOffsetPutByIdExternalLoad = 20;
|
|
537 |
static const int patchLengthPutByIdExternalLoad = 4;
|
|
538 |
static const int patchOffsetPutByIdPropertyMapOffset = 31;
|
|
539 |
// These architecture specific value are used to enable patching - see comment on op_get_by_id.
|
|
540 |
static const int patchOffsetGetByIdStructure = 10;
|
|
541 |
static const int patchOffsetGetByIdBranchToSlowCase = 20;
|
|
542 |
static const int patchOffsetGetByIdExternalLoad = 20;
|
|
543 |
static const int patchLengthGetByIdExternalLoad = 4;
|
|
544 |
static const int patchOffsetGetByIdPropertyMapOffset = 31;
|
|
545 |
static const int patchOffsetGetByIdPutResult = 31;
|
|
546 |
#if ENABLE(OPCODE_SAMPLING)
|
|
547 |
static const int patchOffsetGetByIdSlowCaseCall = 64;
|
|
548 |
#else
|
|
549 |
static const int patchOffsetGetByIdSlowCaseCall = 41;
|
|
550 |
#endif
|
|
551 |
static const int patchOffsetOpCallCompareToJump = 9;
|
|
552 |
|
|
553 |
static const int patchOffsetMethodCheckProtoObj = 20;
|
|
554 |
static const int patchOffsetMethodCheckProtoStruct = 30;
|
|
555 |
static const int patchOffsetMethodCheckPutFunction = 50;
|
|
556 |
#elif PLATFORM(X86)
|
|
557 |
// These architecture specific value are used to enable patching - see comment on op_put_by_id.
|
|
558 |
static const int patchOffsetPutByIdStructure = 7;
|
|
559 |
static const int patchOffsetPutByIdExternalLoad = 13;
|
|
560 |
static const int patchLengthPutByIdExternalLoad = 3;
|
|
561 |
static const int patchOffsetPutByIdPropertyMapOffset = 22;
|
|
562 |
// These architecture specific value are used to enable patching - see comment on op_get_by_id.
|
|
563 |
static const int patchOffsetGetByIdStructure = 7;
|
|
564 |
static const int patchOffsetGetByIdBranchToSlowCase = 13;
|
|
565 |
static const int patchOffsetGetByIdExternalLoad = 13;
|
|
566 |
static const int patchLengthGetByIdExternalLoad = 3;
|
|
567 |
static const int patchOffsetGetByIdPropertyMapOffset = 22;
|
|
568 |
static const int patchOffsetGetByIdPutResult = 22;
|
|
569 |
#if ENABLE(OPCODE_SAMPLING) && USE(JIT_STUB_ARGUMENT_VA_LIST)
|
|
570 |
static const int patchOffsetGetByIdSlowCaseCall = 31;
|
|
571 |
#elif ENABLE(OPCODE_SAMPLING)
|
|
572 |
static const int patchOffsetGetByIdSlowCaseCall = 33;
|
|
573 |
#elif USE(JIT_STUB_ARGUMENT_VA_LIST)
|
|
574 |
static const int patchOffsetGetByIdSlowCaseCall = 21;
|
|
575 |
#else
|
|
576 |
static const int patchOffsetGetByIdSlowCaseCall = 23;
|
|
577 |
#endif
|
|
578 |
static const int patchOffsetOpCallCompareToJump = 6;
|
|
579 |
|
|
580 |
static const int patchOffsetMethodCheckProtoObj = 11;
|
|
581 |
static const int patchOffsetMethodCheckProtoStruct = 18;
|
|
582 |
static const int patchOffsetMethodCheckPutFunction = 29;
|
|
583 |
#elif PLATFORM(ARM_THUMB2)
|
|
584 |
// These architecture specific value are used to enable patching - see comment on op_put_by_id.
|
|
585 |
static const int patchOffsetPutByIdStructure = 10;
|
|
586 |
static const int patchOffsetPutByIdExternalLoad = 20;
|
|
587 |
static const int patchLengthPutByIdExternalLoad = 12;
|
|
588 |
static const int patchOffsetPutByIdPropertyMapOffset = 40;
|
|
589 |
// These architecture specific value are used to enable patching - see comment on op_get_by_id.
|
|
590 |
static const int patchOffsetGetByIdStructure = 10;
|
|
591 |
static const int patchOffsetGetByIdBranchToSlowCase = 20;
|
|
592 |
static const int patchOffsetGetByIdExternalLoad = 20;
|
|
593 |
static const int patchLengthGetByIdExternalLoad = 12;
|
|
594 |
static const int patchOffsetGetByIdPropertyMapOffset = 40;
|
|
595 |
static const int patchOffsetGetByIdPutResult = 44;
|
|
596 |
#if ENABLE(OPCODE_SAMPLING)
|
|
597 |
static const int patchOffsetGetByIdSlowCaseCall = 0; // FIMXE
|
|
598 |
#else
|
|
599 |
static const int patchOffsetGetByIdSlowCaseCall = 28;
|
|
600 |
#endif
|
|
601 |
static const int patchOffsetOpCallCompareToJump = 10;
|
|
602 |
|
|
603 |
static const int patchOffsetMethodCheckProtoObj = 18;
|
|
604 |
static const int patchOffsetMethodCheckProtoStruct = 28;
|
|
605 |
static const int patchOffsetMethodCheckPutFunction = 46;
|
|
606 |
#elif PLATFORM(ARM_TRADITIONAL)
|
|
607 |
// These architecture specific value are used to enable patching - see comment on op_put_by_id.
|
|
608 |
static const int patchOffsetPutByIdStructure = 4;
|
|
609 |
static const int patchOffsetPutByIdExternalLoad = 16;
|
|
610 |
static const int patchLengthPutByIdExternalLoad = 4;
|
|
611 |
static const int patchOffsetPutByIdPropertyMapOffset = 20;
|
|
612 |
// These architecture specific value are used to enable patching - see comment on op_get_by_id.
|
|
613 |
static const int patchOffsetGetByIdStructure = 4;
|
|
614 |
static const int patchOffsetGetByIdBranchToSlowCase = 16;
|
|
615 |
static const int patchOffsetGetByIdExternalLoad = 16;
|
|
616 |
static const int patchLengthGetByIdExternalLoad = 4;
|
|
617 |
static const int patchOffsetGetByIdPropertyMapOffset = 20;
|
|
618 |
static const int patchOffsetGetByIdPutResult = 28;
|
|
619 |
#if ENABLE(OPCODE_SAMPLING)
|
|
620 |
#error "OPCODE_SAMPLING is not yet supported"
|
|
621 |
#else
|
|
622 |
static const int patchOffsetGetByIdSlowCaseCall = 36;
|
|
623 |
#endif
|
|
624 |
static const int patchOffsetOpCallCompareToJump = 12;
|
|
625 |
|
|
626 |
static const int patchOffsetMethodCheckProtoObj = 12;
|
|
627 |
static const int patchOffsetMethodCheckProtoStruct = 20;
|
|
628 |
static const int patchOffsetMethodCheckPutFunction = 32;
|
|
629 |
#endif
|
|
630 |
#endif // USE(JSVALUE32_64)
|
|
631 |
|
|
632 |
#if PLATFORM(ARM_TRADITIONAL)
|
|
633 |
// sequenceOpCall
|
|
634 |
static const int sequenceOpCallInstructionSpace = 12;
|
|
635 |
static const int sequenceOpCallConstantSpace = 2;
|
|
636 |
// sequenceMethodCheck
|
|
637 |
static const int sequenceMethodCheckInstructionSpace = 40;
|
|
638 |
static const int sequenceMethodCheckConstantSpace = 6;
|
|
639 |
// sequenceGetByIdHotPath
|
|
640 |
static const int sequenceGetByIdHotPathInstructionSpace = 28;
|
|
641 |
static const int sequenceGetByIdHotPathConstantSpace = 3;
|
|
642 |
// sequenceGetByIdSlowCase
|
|
643 |
static const int sequenceGetByIdSlowCaseInstructionSpace = 40;
|
|
644 |
static const int sequenceGetByIdSlowCaseConstantSpace = 2;
|
|
645 |
// sequencePutById
|
|
646 |
static const int sequencePutByIdInstructionSpace = 28;
|
|
647 |
static const int sequencePutByIdConstantSpace = 3;
|
|
648 |
#endif
|
|
649 |
|
|
650 |
#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
|
|
651 |
#define BEGIN_UNINTERRUPTED_SEQUENCE(name) beginUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace)
|
|
652 |
#define END_UNINTERRUPTED_SEQUENCE(name) endUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace)
|
|
653 |
|
|
654 |
void beginUninterruptedSequence(int, int);
|
|
655 |
void endUninterruptedSequence(int, int);
|
|
656 |
|
|
657 |
#else
|
|
658 |
#define BEGIN_UNINTERRUPTED_SEQUENCE(name)
|
|
659 |
#define END_UNINTERRUPTED_SEQUENCE(name)
|
|
660 |
#endif
|
|
661 |
|
|
662 |
void emit_op_add(Instruction*);
|
|
663 |
void emit_op_bitand(Instruction*);
|
|
664 |
void emit_op_bitnot(Instruction*);
|
|
665 |
void emit_op_bitor(Instruction*);
|
|
666 |
void emit_op_bitxor(Instruction*);
|
|
667 |
void emit_op_call(Instruction*);
|
|
668 |
void emit_op_call_eval(Instruction*);
|
|
669 |
void emit_op_call_varargs(Instruction*);
|
|
670 |
void emit_op_catch(Instruction*);
|
|
671 |
void emit_op_construct(Instruction*);
|
|
672 |
void emit_op_construct_verify(Instruction*);
|
|
673 |
void emit_op_convert_this(Instruction*);
|
|
674 |
void emit_op_create_arguments(Instruction*);
|
|
675 |
void emit_op_debug(Instruction*);
|
|
676 |
void emit_op_del_by_id(Instruction*);
|
|
677 |
void emit_op_div(Instruction*);
|
|
678 |
void emit_op_end(Instruction*);
|
|
679 |
void emit_op_enter(Instruction*);
|
|
680 |
void emit_op_enter_with_activation(Instruction*);
|
|
681 |
void emit_op_eq(Instruction*);
|
|
682 |
void emit_op_eq_null(Instruction*);
|
|
683 |
void emit_op_get_by_id(Instruction*);
|
|
684 |
void emit_op_get_by_val(Instruction*);
|
|
685 |
void emit_op_get_global_var(Instruction*);
|
|
686 |
void emit_op_get_scoped_var(Instruction*);
|
|
687 |
void emit_op_init_arguments(Instruction*);
|
|
688 |
void emit_op_instanceof(Instruction*);
|
|
689 |
void emit_op_jeq_null(Instruction*);
|
|
690 |
void emit_op_jfalse(Instruction*);
|
|
691 |
void emit_op_jmp(Instruction*);
|
|
692 |
void emit_op_jmp_scopes(Instruction*);
|
|
693 |
void emit_op_jneq_null(Instruction*);
|
|
694 |
void emit_op_jneq_ptr(Instruction*);
|
|
695 |
void emit_op_jnless(Instruction*);
|
|
696 |
void emit_op_jnlesseq(Instruction*);
|
|
697 |
void emit_op_jsr(Instruction*);
|
|
698 |
void emit_op_jtrue(Instruction*);
|
|
699 |
void emit_op_load_varargs(Instruction*);
|
|
700 |
void emit_op_loop(Instruction*);
|
|
701 |
void emit_op_loop_if_less(Instruction*);
|
|
702 |
void emit_op_loop_if_lesseq(Instruction*);
|
|
703 |
void emit_op_loop_if_true(Instruction*);
|
|
704 |
void emit_op_lshift(Instruction*);
|
|
705 |
void emit_op_method_check(Instruction*);
|
|
706 |
void emit_op_mod(Instruction*);
|
|
707 |
void emit_op_mov(Instruction*);
|
|
708 |
void emit_op_mul(Instruction*);
|
|
709 |
void emit_op_negate(Instruction*);
|
|
710 |
void emit_op_neq(Instruction*);
|
|
711 |
void emit_op_neq_null(Instruction*);
|
|
712 |
void emit_op_new_array(Instruction*);
|
|
713 |
void emit_op_new_error(Instruction*);
|
|
714 |
void emit_op_new_func(Instruction*);
|
|
715 |
void emit_op_new_func_exp(Instruction*);
|
|
716 |
void emit_op_new_object(Instruction*);
|
|
717 |
void emit_op_new_regexp(Instruction*);
|
|
718 |
void emit_op_get_pnames(Instruction*);
|
|
719 |
void emit_op_next_pname(Instruction*);
|
|
720 |
void emit_op_not(Instruction*);
|
|
721 |
void emit_op_nstricteq(Instruction*);
|
|
722 |
void emit_op_pop_scope(Instruction*);
|
|
723 |
void emit_op_post_dec(Instruction*);
|
|
724 |
void emit_op_post_inc(Instruction*);
|
|
725 |
void emit_op_pre_dec(Instruction*);
|
|
726 |
void emit_op_pre_inc(Instruction*);
|
|
727 |
void emit_op_profile_did_call(Instruction*);
|
|
728 |
void emit_op_profile_will_call(Instruction*);
|
|
729 |
void emit_op_push_new_scope(Instruction*);
|
|
730 |
void emit_op_push_scope(Instruction*);
|
|
731 |
void emit_op_put_by_id(Instruction*);
|
|
732 |
void emit_op_put_by_index(Instruction*);
|
|
733 |
void emit_op_put_by_val(Instruction*);
|
|
734 |
void emit_op_put_getter(Instruction*);
|
|
735 |
void emit_op_put_global_var(Instruction*);
|
|
736 |
void emit_op_put_scoped_var(Instruction*);
|
|
737 |
void emit_op_put_setter(Instruction*);
|
|
738 |
void emit_op_resolve(Instruction*);
|
|
739 |
void emit_op_resolve_base(Instruction*);
|
|
740 |
void emit_op_resolve_global(Instruction*);
|
|
741 |
void emit_op_resolve_skip(Instruction*);
|
|
742 |
void emit_op_resolve_with_base(Instruction*);
|
|
743 |
void emit_op_ret(Instruction*);
|
|
744 |
void emit_op_rshift(Instruction*);
|
|
745 |
void emit_op_sret(Instruction*);
|
|
746 |
void emit_op_strcat(Instruction*);
|
|
747 |
void emit_op_stricteq(Instruction*);
|
|
748 |
void emit_op_sub(Instruction*);
|
|
749 |
void emit_op_switch_char(Instruction*);
|
|
750 |
void emit_op_switch_imm(Instruction*);
|
|
751 |
void emit_op_switch_string(Instruction*);
|
|
752 |
void emit_op_tear_off_activation(Instruction*);
|
|
753 |
void emit_op_tear_off_arguments(Instruction*);
|
|
754 |
void emit_op_throw(Instruction*);
|
|
755 |
void emit_op_to_jsnumber(Instruction*);
|
|
756 |
void emit_op_to_primitive(Instruction*);
|
|
757 |
void emit_op_unexpected_load(Instruction*);
|
|
758 |
|
|
759 |
void emitSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
760 |
void emitSlow_op_bitand(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
761 |
void emitSlow_op_bitnot(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
762 |
void emitSlow_op_bitor(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
763 |
void emitSlow_op_bitxor(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
764 |
void emitSlow_op_call(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
765 |
void emitSlow_op_call_eval(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
766 |
void emitSlow_op_call_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
767 |
void emitSlow_op_construct(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
768 |
void emitSlow_op_construct_verify(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
769 |
void emitSlow_op_convert_this(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
770 |
void emitSlow_op_div(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
771 |
void emitSlow_op_eq(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
772 |
void emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
773 |
void emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
774 |
void emitSlow_op_instanceof(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
775 |
void emitSlow_op_jfalse(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
776 |
void emitSlow_op_jnless(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
777 |
void emitSlow_op_jnlesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
778 |
void emitSlow_op_jtrue(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
779 |
void emitSlow_op_loop_if_less(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
780 |
void emitSlow_op_loop_if_lesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
781 |
void emitSlow_op_loop_if_true(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
782 |
void emitSlow_op_lshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
783 |
void emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
784 |
void emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
785 |
void emitSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
786 |
void emitSlow_op_negate(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
787 |
void emitSlow_op_neq(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
788 |
void emitSlow_op_not(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
789 |
void emitSlow_op_nstricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
790 |
void emitSlow_op_post_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
791 |
void emitSlow_op_post_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
792 |
void emitSlow_op_pre_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
793 |
void emitSlow_op_pre_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
794 |
void emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
795 |
void emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
796 |
void emitSlow_op_resolve_global(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
797 |
void emitSlow_op_rshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
798 |
void emitSlow_op_stricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
799 |
void emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
800 |
void emitSlow_op_to_jsnumber(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
801 |
void emitSlow_op_to_primitive(Instruction*, Vector<SlowCaseEntry>::iterator&);
|
|
802 |
|
|
803 |
/* These functions are deprecated: Please use JITStubCall instead. */
|
|
804 |
void emitPutJITStubArg(RegisterID src, unsigned argumentNumber);
|
|
805 |
#if USE(JSVALUE32_64)
|
|
806 |
void emitPutJITStubArg(RegisterID tag, RegisterID payload, unsigned argumentNumber);
|
|
807 |
void emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch1, RegisterID scratch2);
|
|
808 |
#else
|
|
809 |
void emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch);
|
|
810 |
#endif
|
|
811 |
void emitPutJITStubArgConstant(unsigned value, unsigned argumentNumber);
|
|
812 |
void emitPutJITStubArgConstant(void* value, unsigned argumentNumber);
|
|
813 |
void emitGetJITStubArg(unsigned argumentNumber, RegisterID dst);
|
|
814 |
|
|
815 |
void emitInitRegister(unsigned dst);
|
|
816 |
|
|
817 |
void emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry);
|
|
818 |
void emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry);
|
|
819 |
void emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister);
|
|
820 |
void emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister);
|
|
821 |
|
|
822 |
JSValue getConstantOperand(unsigned src);
|
|
823 |
bool isOperandConstantImmediateInt(unsigned src);
|
|
824 |
|
|
825 |
Jump getSlowCase(Vector<SlowCaseEntry>::iterator& iter)
|
|
826 |
{
|
|
827 |
return iter++->from;
|
|
828 |
}
|
|
829 |
void linkSlowCase(Vector<SlowCaseEntry>::iterator& iter)
|
|
830 |
{
|
|
831 |
iter->from.link(this);
|
|
832 |
++iter;
|
|
833 |
}
|
|
834 |
void linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&, int vReg);
|
|
835 |
|
|
836 |
Jump checkStructure(RegisterID reg, Structure* structure);
|
|
837 |
|
|
838 |
void restoreArgumentReference();
|
|
839 |
void restoreArgumentReferenceForTrampoline();
|
|
840 |
|
|
841 |
Call emitNakedCall(CodePtr function = CodePtr());
|
|
842 |
|
|
843 |
void preserveReturnAddressAfterCall(RegisterID);
|
|
844 |
void restoreReturnAddressBeforeReturn(RegisterID);
|
|
845 |
void restoreReturnAddressBeforeReturn(Address);
|
|
846 |
|
|
847 |
void emitTimeoutCheck();
|
|
848 |
#ifndef NDEBUG
|
|
849 |
void printBytecodeOperandTypes(unsigned src1, unsigned src2);
|
|
850 |
#endif
|
|
851 |
|
|
852 |
#if ENABLE(SAMPLING_FLAGS)
|
|
853 |
void setSamplingFlag(int32_t);
|
|
854 |
void clearSamplingFlag(int32_t);
|
|
855 |
#endif
|
|
856 |
|
|
857 |
#if ENABLE(SAMPLING_COUNTERS)
|
|
858 |
void emitCount(AbstractSamplingCounter&, uint32_t = 1);
|
|
859 |
#endif
|
|
860 |
|
|
861 |
#if ENABLE(OPCODE_SAMPLING)
|
|
862 |
void sampleInstruction(Instruction*, bool = false);
|
|
863 |
#endif
|
|
864 |
|
|
865 |
#if ENABLE(CODEBLOCK_SAMPLING)
|
|
866 |
void sampleCodeBlock(CodeBlock*);
|
|
867 |
#else
|
|
868 |
void sampleCodeBlock(CodeBlock*) {}
|
|
869 |
#endif
|
|
870 |
|
|
871 |
Interpreter* m_interpreter;
|
|
872 |
JSGlobalData* m_globalData;
|
|
873 |
CodeBlock* m_codeBlock;
|
|
874 |
|
|
875 |
Vector<CallRecord> m_calls;
|
|
876 |
Vector<Label> m_labels;
|
|
877 |
Vector<PropertyStubCompilationInfo> m_propertyAccessCompilationInfo;
|
|
878 |
Vector<StructureStubCompilationInfo> m_callStructureStubCompilationInfo;
|
|
879 |
Vector<MethodCallCompilationInfo> m_methodCallCompilationInfo;
|
|
880 |
Vector<JumpTable> m_jmpTable;
|
|
881 |
|
|
882 |
unsigned m_bytecodeIndex;
|
|
883 |
Vector<JSRInfo> m_jsrSites;
|
|
884 |
Vector<SlowCaseEntry> m_slowCases;
|
|
885 |
Vector<SwitchRecord> m_switches;
|
|
886 |
|
|
887 |
unsigned m_propertyAccessInstructionIndex;
|
|
888 |
unsigned m_globalResolveInfoIndex;
|
|
889 |
unsigned m_callLinkInfoIndex;
|
|
890 |
|
|
891 |
#if USE(JSVALUE32_64)
|
|
892 |
unsigned m_jumpTargetIndex;
|
|
893 |
unsigned m_mappedBytecodeIndex;
|
|
894 |
unsigned m_mappedVirtualRegisterIndex;
|
|
895 |
RegisterID m_mappedTag;
|
|
896 |
RegisterID m_mappedPayload;
|
|
897 |
#else
|
|
898 |
int m_lastResultBytecodeRegister;
|
|
899 |
unsigned m_jumpTargetsPosition;
|
|
900 |
#endif
|
|
901 |
|
|
902 |
#ifndef NDEBUG
|
|
903 |
#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
|
|
904 |
Label m_uninterruptedInstructionSequenceBegin;
|
|
905 |
int m_uninterruptedConstantSequenceBegin;
|
|
906 |
#endif
|
|
907 |
#endif
|
|
908 |
} JIT_CLASS_ALIGNMENT;
|
|
909 |
} // namespace JSC
|
|
910 |
|
|
911 |
#endif // ENABLE(JIT)
|
|
912 |
|
|
913 |
#endif // JIT_h
|