|
1 /* |
|
2 * Copyright (C) 2008, 2009 Apple Inc. All rights reserved. |
|
3 * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca> |
|
4 * |
|
5 * Redistribution and use in source and binary forms, with or without |
|
6 * modification, are permitted provided that the following conditions |
|
7 * are met: |
|
8 * |
|
9 * 1. Redistributions of source code must retain the above copyright |
|
10 * notice, this list of conditions and the following disclaimer. |
|
11 * 2. Redistributions in binary form must reproduce the above copyright |
|
12 * notice, this list of conditions and the following disclaimer in the |
|
13 * documentation and/or other materials provided with the distribution. |
|
14 * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of |
|
15 * its contributors may be used to endorse or promote products derived |
|
16 * from this software without specific prior written permission. |
|
17 * |
|
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY |
|
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
|
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
|
21 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY |
|
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
|
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
|
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
|
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
|
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
|
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|
28 */ |
|
29 |
|
30 #ifndef BytecodeGenerator_h |
|
31 #define BytecodeGenerator_h |
|
32 |
|
33 #include "CodeBlock.h" |
|
34 #include "HashTraits.h" |
|
35 #include "Instruction.h" |
|
36 #include "Label.h" |
|
37 #include "LabelScope.h" |
|
38 #include "Interpreter.h" |
|
39 #include "RegisterID.h" |
|
40 #include "SymbolTable.h" |
|
41 #include "Debugger.h" |
|
42 #include "Nodes.h" |
|
43 #include <wtf/FastAllocBase.h> |
|
44 #include <wtf/PassRefPtr.h> |
|
45 #include <wtf/SegmentedVector.h> |
|
46 #include <wtf/Vector.h> |
|
47 |
|
48 namespace JSC { |
|
49 |
|
50 class Identifier; |
|
51 class ScopeChain; |
|
52 class ScopeNode; |
|
53 |
|
54 class CallArguments { |
|
55 public: |
|
56 CallArguments(BytecodeGenerator& generator, ArgumentsNode* argumentsNode); |
|
57 |
|
58 RegisterID* thisRegister() { return m_argv[0].get(); } |
|
59 RegisterID* argumentRegister(unsigned i) { return m_argv[i + 1].get(); } |
|
60 unsigned callFrame() { return thisRegister()->index() + count() + RegisterFile::CallFrameHeaderSize; } |
|
61 unsigned count() { return m_argv.size(); } |
|
62 RegisterID* profileHookRegister() { return m_profileHookRegister.get(); } |
|
63 ArgumentsNode* argumentsNode() { return m_argumentsNode; } |
|
64 |
|
65 private: |
|
66 RefPtr<RegisterID> m_profileHookRegister; |
|
67 ArgumentsNode* m_argumentsNode; |
|
68 Vector<RefPtr<RegisterID>, 16> m_argv; |
|
69 }; |
|
70 |
|
71 struct FinallyContext { |
|
72 Label* finallyAddr; |
|
73 RegisterID* retAddrDst; |
|
74 }; |
|
75 |
|
76 struct ControlFlowContext { |
|
77 bool isFinallyBlock; |
|
78 FinallyContext finallyContext; |
|
79 }; |
|
80 |
|
81 struct ForInContext { |
|
82 RefPtr<RegisterID> expectedSubscriptRegister; |
|
83 RefPtr<RegisterID> iterRegister; |
|
84 RefPtr<RegisterID> indexRegister; |
|
85 RefPtr<RegisterID> propertyRegister; |
|
86 }; |
|
87 |
|
88 class BytecodeGenerator : public FastAllocBase { |
|
89 public: |
|
90 typedef DeclarationStacks::VarStack VarStack; |
|
91 typedef DeclarationStacks::FunctionStack FunctionStack; |
|
92 |
|
93 static void setDumpsGeneratedCode(bool dumpsGeneratedCode); |
|
94 static bool dumpsGeneratedCode(); |
|
95 |
|
96 BytecodeGenerator(ProgramNode*, const Debugger*, const ScopeChain&, SymbolTable*, ProgramCodeBlock*); |
|
97 BytecodeGenerator(FunctionBodyNode*, const Debugger*, const ScopeChain&, SymbolTable*, CodeBlock*); |
|
98 BytecodeGenerator(EvalNode*, const Debugger*, const ScopeChain&, SymbolTable*, EvalCodeBlock*); |
|
99 |
|
100 JSGlobalData* globalData() const { return m_globalData; } |
|
101 const CommonIdentifiers& propertyNames() const { return *m_globalData->propertyNames; } |
|
102 |
|
103 bool isConstructor() { return m_codeBlock->m_isConstructor; } |
|
104 |
|
105 void generate(); |
|
106 |
|
107 // Returns the register corresponding to a local variable, or 0 if no |
|
108 // such register exists. Registers returned by registerFor do not |
|
109 // require explicit reference counting. |
|
110 RegisterID* registerFor(const Identifier&); |
|
111 |
|
112 // Returns the agument number if this is an argument, or 0 if not. |
|
113 int argumentNumberFor(const Identifier&); |
|
114 |
|
115 void setIsNumericCompareFunction(bool isNumericCompareFunction); |
|
116 |
|
117 bool willResolveToArguments(const Identifier&); |
|
118 RegisterID* uncheckedRegisterForArguments(); |
|
119 |
|
120 // Behaves as registerFor does, but ignores dynamic scope as |
|
121 // dynamic scope should not interfere with const initialisation |
|
122 RegisterID* constRegisterFor(const Identifier&); |
|
123 |
|
124 // Searches the scope chain in an attempt to statically locate the requested |
|
125 // property. Returns false if for any reason the property cannot be safely |
|
126 // optimised at all. Otherwise it will return the index and depth of the |
|
127 // VariableObject that defines the property. If the property cannot be found |
|
128 // statically, depth will contain the depth of the scope chain where dynamic |
|
129 // lookup must begin. |
|
130 bool findScopedProperty(const Identifier&, int& index, size_t& depth, bool forWriting, bool& includesDynamicScopes, JSObject*& globalObject); |
|
131 |
|
132 // Returns the register storing "this" |
|
133 RegisterID* thisRegister() { return &m_thisRegister; } |
|
134 |
|
135 bool isLocal(const Identifier&); |
|
136 bool isLocalConstant(const Identifier&); |
|
137 |
|
138 // Returns the next available temporary register. Registers returned by |
|
139 // newTemporary require a modified form of reference counting: any |
|
140 // register with a refcount of 0 is considered "available", meaning that |
|
141 // the next instruction may overwrite it. |
|
142 RegisterID* newTemporary(); |
|
143 |
|
144 RegisterID* highestUsedRegister(); |
|
145 |
|
146 // The same as newTemporary(), but this function returns "suggestion" if |
|
147 // "suggestion" is a temporary. This function is helpful in situations |
|
148 // where you've put "suggestion" in a RefPtr, but you'd like to allow |
|
149 // the next instruction to overwrite it anyway. |
|
150 RegisterID* newTemporaryOr(RegisterID* suggestion) { return suggestion->isTemporary() ? suggestion : newTemporary(); } |
|
151 |
|
152 // Functions for handling of dst register |
|
153 |
|
154 RegisterID* ignoredResult() { return &m_ignoredResultRegister; } |
|
155 |
|
156 // Returns a place to write intermediate values of an operation |
|
157 // which reuses dst if it is safe to do so. |
|
158 RegisterID* tempDestination(RegisterID* dst) |
|
159 { |
|
160 return (dst && dst != ignoredResult() && dst->isTemporary()) ? dst : newTemporary(); |
|
161 } |
|
162 |
|
163 // Returns the place to write the final output of an operation. |
|
164 RegisterID* finalDestination(RegisterID* originalDst, RegisterID* tempDst = 0) |
|
165 { |
|
166 if (originalDst && originalDst != ignoredResult()) |
|
167 return originalDst; |
|
168 ASSERT(tempDst != ignoredResult()); |
|
169 if (tempDst && tempDst->isTemporary()) |
|
170 return tempDst; |
|
171 return newTemporary(); |
|
172 } |
|
173 |
|
174 // Returns the place to write the final output of an operation. |
|
175 RegisterID* finalDestinationOrIgnored(RegisterID* originalDst, RegisterID* tempDst = 0) |
|
176 { |
|
177 if (originalDst) |
|
178 return originalDst; |
|
179 ASSERT(tempDst != ignoredResult()); |
|
180 if (tempDst && tempDst->isTemporary()) |
|
181 return tempDst; |
|
182 return newTemporary(); |
|
183 } |
|
184 |
|
185 RegisterID* destinationForAssignResult(RegisterID* dst) |
|
186 { |
|
187 if (dst && dst != ignoredResult() && m_codeBlock->needsFullScopeChain()) |
|
188 return dst->isTemporary() ? dst : newTemporary(); |
|
189 return 0; |
|
190 } |
|
191 |
|
192 // Moves src to dst if dst is not null and is different from src, otherwise just returns src. |
|
193 RegisterID* moveToDestinationIfNeeded(RegisterID* dst, RegisterID* src) |
|
194 { |
|
195 return dst == ignoredResult() ? 0 : (dst && dst != src) ? emitMove(dst, src) : src; |
|
196 } |
|
197 |
|
198 PassRefPtr<LabelScope> newLabelScope(LabelScope::Type, const Identifier* = 0); |
|
199 PassRefPtr<Label> newLabel(); |
|
200 |
|
201 // The emitNode functions are just syntactic sugar for calling |
|
202 // Node::emitCode. These functions accept a 0 for the register, |
|
203 // meaning that the node should allocate a register, or ignoredResult(), |
|
204 // meaning that the node need not put the result in a register. |
|
205 // Other emit functions do not accept 0 or ignoredResult(). |
|
206 RegisterID* emitNode(RegisterID* dst, Node* n) |
|
207 { |
|
208 // Node::emitCode assumes that dst, if provided, is either a local or a referenced temporary. |
|
209 ASSERT(!dst || dst == ignoredResult() || !dst->isTemporary() || dst->refCount()); |
|
210 if (!m_codeBlock->numberOfLineInfos() || m_codeBlock->lastLineInfo().lineNumber != n->lineNo()) { |
|
211 LineInfo info = { instructions().size(), n->lineNo() }; |
|
212 m_codeBlock->addLineInfo(info); |
|
213 } |
|
214 if (m_emitNodeDepth >= s_maxEmitNodeDepth) |
|
215 return emitThrowExpressionTooDeepException(); |
|
216 ++m_emitNodeDepth; |
|
217 RegisterID* r = n->emitBytecode(*this, dst); |
|
218 --m_emitNodeDepth; |
|
219 return r; |
|
220 } |
|
221 |
|
222 RegisterID* emitNode(Node* n) |
|
223 { |
|
224 return emitNode(0, n); |
|
225 } |
|
226 |
|
227 void emitNodeInConditionContext(ExpressionNode* n, Label* trueTarget, Label* falseTarget, bool fallThroughMeansTrue) |
|
228 { |
|
229 if (!m_codeBlock->numberOfLineInfos() || m_codeBlock->lastLineInfo().lineNumber != n->lineNo()) { |
|
230 LineInfo info = { instructions().size(), n->lineNo() }; |
|
231 m_codeBlock->addLineInfo(info); |
|
232 } |
|
233 if (m_emitNodeDepth >= s_maxEmitNodeDepth) |
|
234 emitThrowExpressionTooDeepException(); |
|
235 ++m_emitNodeDepth; |
|
236 n->emitBytecodeInConditionContext(*this, trueTarget, falseTarget, fallThroughMeansTrue); |
|
237 --m_emitNodeDepth; |
|
238 } |
|
239 |
|
240 void emitExpressionInfo(unsigned divot, unsigned startOffset, unsigned endOffset) |
|
241 { |
|
242 divot -= m_codeBlock->sourceOffset(); |
|
243 if (divot > ExpressionRangeInfo::MaxDivot) { |
|
244 // Overflow has occurred, we can only give line number info for errors for this region |
|
245 divot = 0; |
|
246 startOffset = 0; |
|
247 endOffset = 0; |
|
248 } else if (startOffset > ExpressionRangeInfo::MaxOffset) { |
|
249 // If the start offset is out of bounds we clear both offsets |
|
250 // so we only get the divot marker. Error message will have to be reduced |
|
251 // to line and column number. |
|
252 startOffset = 0; |
|
253 endOffset = 0; |
|
254 } else if (endOffset > ExpressionRangeInfo::MaxOffset) { |
|
255 // The end offset is only used for additional context, and is much more likely |
|
256 // to overflow (eg. function call arguments) so we are willing to drop it without |
|
257 // dropping the rest of the range. |
|
258 endOffset = 0; |
|
259 } |
|
260 |
|
261 ExpressionRangeInfo info; |
|
262 info.instructionOffset = instructions().size(); |
|
263 info.divotPoint = divot; |
|
264 info.startOffset = startOffset; |
|
265 info.endOffset = endOffset; |
|
266 m_codeBlock->addExpressionInfo(info); |
|
267 } |
|
268 |
|
269 void emitGetByIdExceptionInfo(OpcodeID opcodeID) |
|
270 { |
|
271 // Only op_construct and op_instanceof need exception info for |
|
272 // a preceding op_get_by_id. |
|
273 ASSERT(opcodeID == op_create_this || opcodeID == op_instanceof); |
|
274 GetByIdExceptionInfo info; |
|
275 info.bytecodeOffset = instructions().size(); |
|
276 info.isOpCreateThis = (opcodeID == op_create_this); |
|
277 m_codeBlock->addGetByIdExceptionInfo(info); |
|
278 } |
|
279 |
|
280 ALWAYS_INLINE bool leftHandSideNeedsCopy(bool rightHasAssignments, bool rightIsPure) |
|
281 { |
|
282 return (m_codeType != FunctionCode || m_codeBlock->needsFullScopeChain() || rightHasAssignments) && !rightIsPure; |
|
283 } |
|
284 |
|
285 ALWAYS_INLINE PassRefPtr<RegisterID> emitNodeForLeftHandSide(ExpressionNode* n, bool rightHasAssignments, bool rightIsPure) |
|
286 { |
|
287 if (leftHandSideNeedsCopy(rightHasAssignments, rightIsPure)) { |
|
288 PassRefPtr<RegisterID> dst = newTemporary(); |
|
289 emitNode(dst.get(), n); |
|
290 return dst; |
|
291 } |
|
292 |
|
293 return PassRefPtr<RegisterID>(emitNode(n)); |
|
294 } |
|
295 |
|
296 RegisterID* emitLoad(RegisterID* dst, bool); |
|
297 RegisterID* emitLoad(RegisterID* dst, double); |
|
298 RegisterID* emitLoad(RegisterID* dst, const Identifier&); |
|
299 RegisterID* emitLoad(RegisterID* dst, JSValue); |
|
300 |
|
301 RegisterID* emitUnaryOp(OpcodeID, RegisterID* dst, RegisterID* src); |
|
302 RegisterID* emitBinaryOp(OpcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes); |
|
303 RegisterID* emitEqualityOp(OpcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2); |
|
304 RegisterID* emitUnaryNoDstOp(OpcodeID, RegisterID* src); |
|
305 |
|
306 RegisterID* emitNewObject(RegisterID* dst); |
|
307 RegisterID* emitNewArray(RegisterID* dst, ElementNode*); // stops at first elision |
|
308 |
|
309 RegisterID* emitNewFunction(RegisterID* dst, FunctionBodyNode* body); |
|
310 RegisterID* emitNewFunctionExpression(RegisterID* dst, FuncExprNode* func); |
|
311 RegisterID* emitNewRegExp(RegisterID* dst, RegExp* regExp); |
|
312 |
|
313 RegisterID* emitMove(RegisterID* dst, RegisterID* src); |
|
314 |
|
315 RegisterID* emitToJSNumber(RegisterID* dst, RegisterID* src) { return emitUnaryOp(op_to_jsnumber, dst, src); } |
|
316 RegisterID* emitPreInc(RegisterID* srcDst); |
|
317 RegisterID* emitPreDec(RegisterID* srcDst); |
|
318 RegisterID* emitPostInc(RegisterID* dst, RegisterID* srcDst); |
|
319 RegisterID* emitPostDec(RegisterID* dst, RegisterID* srcDst); |
|
320 |
|
321 RegisterID* emitInstanceOf(RegisterID* dst, RegisterID* value, RegisterID* base, RegisterID* basePrototype); |
|
322 RegisterID* emitTypeOf(RegisterID* dst, RegisterID* src) { return emitUnaryOp(op_typeof, dst, src); } |
|
323 RegisterID* emitIn(RegisterID* dst, RegisterID* property, RegisterID* base) { return emitBinaryOp(op_in, dst, property, base, OperandTypes()); } |
|
324 |
|
325 RegisterID* emitResolve(RegisterID* dst, const Identifier& property); |
|
326 RegisterID* emitGetScopedVar(RegisterID* dst, size_t skip, int index, JSValue globalObject); |
|
327 RegisterID* emitPutScopedVar(size_t skip, int index, RegisterID* value, JSValue globalObject); |
|
328 |
|
329 RegisterID* emitResolveBase(RegisterID* dst, const Identifier& property); |
|
330 RegisterID* emitResolveWithBase(RegisterID* baseDst, RegisterID* propDst, const Identifier& property); |
|
331 |
|
332 void emitMethodCheck(); |
|
333 |
|
334 RegisterID* emitGetById(RegisterID* dst, RegisterID* base, const Identifier& property); |
|
335 RegisterID* emitPutById(RegisterID* base, const Identifier& property, RegisterID* value); |
|
336 RegisterID* emitDirectPutById(RegisterID* base, const Identifier& property, RegisterID* value); |
|
337 RegisterID* emitDeleteById(RegisterID* dst, RegisterID* base, const Identifier&); |
|
338 RegisterID* emitGetByVal(RegisterID* dst, RegisterID* base, RegisterID* property); |
|
339 RegisterID* emitPutByVal(RegisterID* base, RegisterID* property, RegisterID* value); |
|
340 RegisterID* emitDeleteByVal(RegisterID* dst, RegisterID* base, RegisterID* property); |
|
341 RegisterID* emitPutByIndex(RegisterID* base, unsigned index, RegisterID* value); |
|
342 RegisterID* emitPutGetter(RegisterID* base, const Identifier& property, RegisterID* value); |
|
343 RegisterID* emitPutSetter(RegisterID* base, const Identifier& property, RegisterID* value); |
|
344 |
|
345 RegisterID* emitCall(RegisterID* dst, RegisterID* func, CallArguments&, unsigned divot, unsigned startOffset, unsigned endOffset); |
|
346 RegisterID* emitCallEval(RegisterID* dst, RegisterID* func, CallArguments&, unsigned divot, unsigned startOffset, unsigned endOffset); |
|
347 RegisterID* emitCallVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* argCount, unsigned divot, unsigned startOffset, unsigned endOffset); |
|
348 RegisterID* emitLoadVarargs(RegisterID* argCountDst, RegisterID* args); |
|
349 |
|
350 RegisterID* emitReturn(RegisterID* src); |
|
351 RegisterID* emitEnd(RegisterID* src) { return emitUnaryNoDstOp(op_end, src); } |
|
352 |
|
353 RegisterID* emitConstruct(RegisterID* dst, RegisterID* func, CallArguments&, unsigned divot, unsigned startOffset, unsigned endOffset); |
|
354 RegisterID* emitStrcat(RegisterID* dst, RegisterID* src, int count); |
|
355 void emitToPrimitive(RegisterID* dst, RegisterID* src); |
|
356 |
|
357 PassRefPtr<Label> emitLabel(Label*); |
|
358 PassRefPtr<Label> emitJump(Label* target); |
|
359 PassRefPtr<Label> emitJumpIfTrue(RegisterID* cond, Label* target); |
|
360 PassRefPtr<Label> emitJumpIfFalse(RegisterID* cond, Label* target); |
|
361 PassRefPtr<Label> emitJumpIfNotFunctionCall(RegisterID* cond, Label* target); |
|
362 PassRefPtr<Label> emitJumpIfNotFunctionApply(RegisterID* cond, Label* target); |
|
363 PassRefPtr<Label> emitJumpScopes(Label* target, int targetScopeDepth); |
|
364 |
|
365 PassRefPtr<Label> emitJumpSubroutine(RegisterID* retAddrDst, Label*); |
|
366 void emitSubroutineReturn(RegisterID* retAddrSrc); |
|
367 |
|
368 RegisterID* emitGetPropertyNames(RegisterID* dst, RegisterID* base, RegisterID* i, RegisterID* size, Label* breakTarget); |
|
369 RegisterID* emitNextPropertyName(RegisterID* dst, RegisterID* base, RegisterID* i, RegisterID* size, RegisterID* iter, Label* target); |
|
370 |
|
371 RegisterID* emitCatch(RegisterID*, Label* start, Label* end); |
|
372 void emitThrow(RegisterID* exc) |
|
373 { |
|
374 m_usesExceptions = true; |
|
375 emitUnaryNoDstOp(op_throw, exc); |
|
376 } |
|
377 |
|
378 RegisterID* emitNewError(RegisterID* dst, bool isReferenceError, JSValue message); |
|
379 void emitPushNewScope(RegisterID* dst, const Identifier& property, RegisterID* value); |
|
380 |
|
381 RegisterID* emitPushScope(RegisterID* scope); |
|
382 void emitPopScope(); |
|
383 |
|
384 void emitDebugHook(DebugHookID, int firstLine, int lastLine); |
|
385 |
|
386 int scopeDepth() { return m_dynamicScopeDepth + m_finallyDepth; } |
|
387 bool hasFinaliser() { return m_finallyDepth != 0; } |
|
388 |
|
389 void pushFinallyContext(Label* target, RegisterID* returnAddrDst); |
|
390 void popFinallyContext(); |
|
391 |
|
392 void pushOptimisedForIn(RegisterID* expectedBase, RegisterID* iter, RegisterID* index, RegisterID* propertyRegister) |
|
393 { |
|
394 ForInContext context = { expectedBase, iter, index, propertyRegister }; |
|
395 m_forInContextStack.append(context); |
|
396 } |
|
397 |
|
398 void popOptimisedForIn() |
|
399 { |
|
400 m_forInContextStack.removeLast(); |
|
401 } |
|
402 |
|
403 LabelScope* breakTarget(const Identifier&); |
|
404 LabelScope* continueTarget(const Identifier&); |
|
405 |
|
406 void beginSwitch(RegisterID*, SwitchInfo::SwitchType); |
|
407 void endSwitch(uint32_t clauseCount, RefPtr<Label>*, ExpressionNode**, Label* defaultLabel, int32_t min, int32_t range); |
|
408 |
|
409 CodeType codeType() const { return m_codeType; } |
|
410 |
|
411 void setRegeneratingForExceptionInfo(CodeBlock* originalCodeBlock) |
|
412 { |
|
413 m_regeneratingForExceptionInfo = true; |
|
414 m_codeBlockBeingRegeneratedFrom = originalCodeBlock; |
|
415 } |
|
416 |
|
417 bool shouldEmitProfileHooks() { return m_shouldEmitProfileHooks; } |
|
418 |
|
419 private: |
|
420 void emitOpcode(OpcodeID); |
|
421 void retrieveLastBinaryOp(int& dstIndex, int& src1Index, int& src2Index); |
|
422 void retrieveLastUnaryOp(int& dstIndex, int& srcIndex); |
|
423 void rewindBinaryOp(); |
|
424 void rewindUnaryOp(); |
|
425 |
|
426 PassRefPtr<Label> emitComplexJumpScopes(Label* target, ControlFlowContext* topScope, ControlFlowContext* bottomScope); |
|
427 |
|
428 typedef HashMap<EncodedJSValue, unsigned, EncodedJSValueHash, EncodedJSValueHashTraits> JSValueMap; |
|
429 |
|
430 struct IdentifierMapIndexHashTraits { |
|
431 typedef int TraitType; |
|
432 typedef IdentifierMapIndexHashTraits StorageTraits; |
|
433 static int emptyValue() { return std::numeric_limits<int>::max(); } |
|
434 static const bool emptyValueIsZero = false; |
|
435 static const bool needsDestruction = false; |
|
436 static const bool needsRef = false; |
|
437 }; |
|
438 |
|
439 typedef HashMap<RefPtr<UString::Rep>, int, IdentifierRepHash, HashTraits<RefPtr<UString::Rep> >, IdentifierMapIndexHashTraits> IdentifierMap; |
|
440 typedef HashMap<double, JSValue> NumberMap; |
|
441 typedef HashMap<UString::Rep*, JSString*, IdentifierRepHash> IdentifierStringMap; |
|
442 |
|
443 RegisterID* emitCall(OpcodeID, RegisterID* dst, RegisterID* func, CallArguments&, unsigned divot, unsigned startOffset, unsigned endOffset); |
|
444 |
|
445 RegisterID* newRegister(); |
|
446 |
|
447 // Adds a var slot and maps it to the name ident in symbolTable(). |
|
448 RegisterID* addVar(const Identifier& ident, bool isConstant) |
|
449 { |
|
450 RegisterID* local; |
|
451 addVar(ident, isConstant, local); |
|
452 return local; |
|
453 } |
|
454 |
|
455 // Ditto. Returns true if a new RegisterID was added, false if a pre-existing RegisterID was re-used. |
|
456 bool addVar(const Identifier&, bool isConstant, RegisterID*&); |
|
457 |
|
458 // Adds an anonymous var slot. To give this slot a name, add it to symbolTable(). |
|
459 RegisterID* addVar() |
|
460 { |
|
461 ++m_codeBlock->m_numVars; |
|
462 return newRegister(); |
|
463 } |
|
464 |
|
465 // Returns the RegisterID corresponding to ident. |
|
466 RegisterID* addGlobalVar(const Identifier& ident, bool isConstant) |
|
467 { |
|
468 RegisterID* local; |
|
469 addGlobalVar(ident, isConstant, local); |
|
470 return local; |
|
471 } |
|
472 // Returns true if a new RegisterID was added, false if a pre-existing RegisterID was re-used. |
|
473 bool addGlobalVar(const Identifier&, bool isConstant, RegisterID*&); |
|
474 |
|
475 void addParameter(const Identifier&, int parameterIndex); |
|
476 |
|
477 void preserveLastVar(); |
|
478 |
|
479 RegisterID& registerFor(int index) |
|
480 { |
|
481 if (index >= 0) |
|
482 return m_calleeRegisters[index]; |
|
483 |
|
484 if (m_parameters.size()) { |
|
485 ASSERT(!m_globals.size()); |
|
486 return m_parameters[index + m_parameters.size() + RegisterFile::CallFrameHeaderSize]; |
|
487 } |
|
488 |
|
489 return m_globals[-index - 1]; |
|
490 } |
|
491 |
|
492 unsigned addConstant(const Identifier&); |
|
493 RegisterID* addConstantValue(JSValue); |
|
494 unsigned addRegExp(RegExp*); |
|
495 |
|
496 PassRefPtr<FunctionExecutable> makeFunction(ExecState* exec, FunctionBodyNode* body) |
|
497 { |
|
498 return FunctionExecutable::create(exec, body->ident(), body->source(), body->usesArguments(), body->parameters(), body->lineNo(), body->lastLine()); |
|
499 } |
|
500 |
|
501 PassRefPtr<FunctionExecutable> makeFunction(JSGlobalData* globalData, FunctionBodyNode* body) |
|
502 { |
|
503 return FunctionExecutable::create(globalData, body->ident(), body->source(), body->usesArguments(), body->parameters(), body->lineNo(), body->lastLine()); |
|
504 } |
|
505 |
|
506 Vector<Instruction>& instructions() { return m_codeBlock->instructions(); } |
|
507 SymbolTable& symbolTable() { return *m_symbolTable; } |
|
508 |
|
509 bool shouldOptimizeLocals() { return (m_codeType != EvalCode) && !m_dynamicScopeDepth; } |
|
510 bool canOptimizeNonLocals() { return (m_codeType == FunctionCode) && !m_dynamicScopeDepth && !m_codeBlock->usesEval(); } |
|
511 |
|
512 RegisterID* emitThrowExpressionTooDeepException(); |
|
513 |
|
514 void createArgumentsIfNecessary(); |
|
515 |
|
516 bool m_shouldEmitDebugHooks; |
|
517 bool m_shouldEmitProfileHooks; |
|
518 |
|
519 const ScopeChain* m_scopeChain; |
|
520 SymbolTable* m_symbolTable; |
|
521 |
|
522 ScopeNode* m_scopeNode; |
|
523 CodeBlock* m_codeBlock; |
|
524 |
|
525 // Some of these objects keep pointers to one another. They are arranged |
|
526 // to ensure a sane destruction order that avoids references to freed memory. |
|
527 HashSet<RefPtr<UString::Rep>, IdentifierRepHash> m_functions; |
|
528 RegisterID m_ignoredResultRegister; |
|
529 RegisterID m_thisRegister; |
|
530 RegisterID* m_activationRegister; |
|
531 SegmentedVector<RegisterID, 32> m_constantPoolRegisters; |
|
532 SegmentedVector<RegisterID, 32> m_calleeRegisters; |
|
533 SegmentedVector<RegisterID, 32> m_parameters; |
|
534 SegmentedVector<RegisterID, 32> m_globals; |
|
535 SegmentedVector<Label, 32> m_labels; |
|
536 SegmentedVector<LabelScope, 8> m_labelScopes; |
|
537 RefPtr<RegisterID> m_lastVar; |
|
538 int m_finallyDepth; |
|
539 int m_dynamicScopeDepth; |
|
540 int m_baseScopeDepth; |
|
541 CodeType m_codeType; |
|
542 |
|
543 Vector<ControlFlowContext> m_scopeContextStack; |
|
544 Vector<SwitchInfo> m_switchContextStack; |
|
545 Vector<ForInContext> m_forInContextStack; |
|
546 |
|
547 int m_nextGlobalIndex; |
|
548 int m_firstConstantIndex; |
|
549 int m_nextConstantOffset; |
|
550 unsigned m_globalConstantIndex; |
|
551 |
|
552 int m_globalVarStorageOffset; |
|
553 |
|
554 // Constant pool |
|
555 IdentifierMap m_identifierMap; |
|
556 JSValueMap m_jsValueMap; |
|
557 NumberMap m_numberMap; |
|
558 IdentifierStringMap m_stringMap; |
|
559 |
|
560 JSGlobalData* m_globalData; |
|
561 |
|
562 OpcodeID m_lastOpcodeID; |
|
563 |
|
564 unsigned m_emitNodeDepth; |
|
565 |
|
566 bool m_usesExceptions; |
|
567 bool m_regeneratingForExceptionInfo; |
|
568 CodeBlock* m_codeBlockBeingRegeneratedFrom; |
|
569 |
|
570 static const unsigned s_maxEmitNodeDepth = 5000; |
|
571 }; |
|
572 |
|
573 } |
|
574 |
|
575 #endif // BytecodeGenerator_h |