|
1 /* |
|
2 * Copyright (C) 2008 Apple Inc. |
|
3 * Copyright (C) 2009, 2010 University of Szeged |
|
4 * All rights reserved. |
|
5 * |
|
6 * Redistribution and use in source and binary forms, with or without |
|
7 * modification, are permitted provided that the following conditions |
|
8 * are met: |
|
9 * 1. Redistributions of source code must retain the above copyright |
|
10 * notice, this list of conditions and the following disclaimer. |
|
11 * 2. Redistributions in binary form must reproduce the above copyright |
|
12 * notice, this list of conditions and the following disclaimer in the |
|
13 * documentation and/or other materials provided with the distribution. |
|
14 * |
|
15 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
|
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
|
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|
18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
|
19 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
|
20 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
|
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
|
22 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
|
23 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
|
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
|
25 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|
26 */ |
|
27 |
|
28 #ifndef MacroAssemblerARM_h |
|
29 #define MacroAssemblerARM_h |
|
30 |
|
31 #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) |
|
32 |
|
33 #include "ARMAssembler.h" |
|
34 #include "AbstractMacroAssembler.h" |
|
35 |
|
36 namespace JSC { |
|
37 |
|
38 class MacroAssemblerARM : public AbstractMacroAssembler<ARMAssembler> { |
|
39 static const int DoubleConditionMask = 0x0f; |
|
40 static const int DoubleConditionBitSpecial = 0x10; |
|
41 COMPILE_ASSERT(!(DoubleConditionBitSpecial & DoubleConditionMask), DoubleConditionBitSpecial_should_not_interfere_with_ARMAssembler_Condition_codes); |
|
42 public: |
|
43 typedef ARMRegisters::FPRegisterID FPRegisterID; |
|
44 |
|
45 enum Condition { |
|
46 Equal = ARMAssembler::EQ, |
|
47 NotEqual = ARMAssembler::NE, |
|
48 Above = ARMAssembler::HI, |
|
49 AboveOrEqual = ARMAssembler::CS, |
|
50 Below = ARMAssembler::CC, |
|
51 BelowOrEqual = ARMAssembler::LS, |
|
52 GreaterThan = ARMAssembler::GT, |
|
53 GreaterThanOrEqual = ARMAssembler::GE, |
|
54 LessThan = ARMAssembler::LT, |
|
55 LessThanOrEqual = ARMAssembler::LE, |
|
56 Overflow = ARMAssembler::VS, |
|
57 Signed = ARMAssembler::MI, |
|
58 Zero = ARMAssembler::EQ, |
|
59 NonZero = ARMAssembler::NE |
|
60 }; |
|
61 |
|
62 enum DoubleCondition { |
|
63 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN. |
|
64 DoubleEqual = ARMAssembler::EQ, |
|
65 DoubleNotEqual = ARMAssembler::NE | DoubleConditionBitSpecial, |
|
66 DoubleGreaterThan = ARMAssembler::GT, |
|
67 DoubleGreaterThanOrEqual = ARMAssembler::GE, |
|
68 DoubleLessThan = ARMAssembler::CC, |
|
69 DoubleLessThanOrEqual = ARMAssembler::LS, |
|
70 // If either operand is NaN, these conditions always evaluate to true. |
|
71 DoubleEqualOrUnordered = ARMAssembler::EQ | DoubleConditionBitSpecial, |
|
72 DoubleNotEqualOrUnordered = ARMAssembler::NE, |
|
73 DoubleGreaterThanOrUnordered = ARMAssembler::HI, |
|
74 DoubleGreaterThanOrEqualOrUnordered = ARMAssembler::CS, |
|
75 DoubleLessThanOrUnordered = ARMAssembler::LT, |
|
76 DoubleLessThanOrEqualOrUnordered = ARMAssembler::LE, |
|
77 }; |
|
78 |
|
79 static const RegisterID stackPointerRegister = ARMRegisters::sp; |
|
80 static const RegisterID linkRegister = ARMRegisters::lr; |
|
81 |
|
82 static const Scale ScalePtr = TimesFour; |
|
83 |
|
84 void add32(RegisterID src, RegisterID dest) |
|
85 { |
|
86 m_assembler.adds_r(dest, dest, src); |
|
87 } |
|
88 |
|
89 void add32(Imm32 imm, Address address) |
|
90 { |
|
91 load32(address, ARMRegisters::S1); |
|
92 add32(imm, ARMRegisters::S1); |
|
93 store32(ARMRegisters::S1, address); |
|
94 } |
|
95 |
|
96 void add32(Imm32 imm, RegisterID dest) |
|
97 { |
|
98 m_assembler.adds_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0)); |
|
99 } |
|
100 |
|
101 void add32(Address src, RegisterID dest) |
|
102 { |
|
103 load32(src, ARMRegisters::S1); |
|
104 add32(ARMRegisters::S1, dest); |
|
105 } |
|
106 |
|
107 void and32(RegisterID src, RegisterID dest) |
|
108 { |
|
109 m_assembler.ands_r(dest, dest, src); |
|
110 } |
|
111 |
|
112 void and32(Imm32 imm, RegisterID dest) |
|
113 { |
|
114 ARMWord w = m_assembler.getImm(imm.m_value, ARMRegisters::S0, true); |
|
115 if (w & ARMAssembler::OP2_INV_IMM) |
|
116 m_assembler.bics_r(dest, dest, w & ~ARMAssembler::OP2_INV_IMM); |
|
117 else |
|
118 m_assembler.ands_r(dest, dest, w); |
|
119 } |
|
120 |
|
121 void lshift32(RegisterID shift_amount, RegisterID dest) |
|
122 { |
|
123 ARMWord w = ARMAssembler::getOp2(0x1f); |
|
124 ASSERT(w != ARMAssembler::INVALID_IMM); |
|
125 m_assembler.and_r(ARMRegisters::S0, shift_amount, w); |
|
126 |
|
127 m_assembler.movs_r(dest, m_assembler.lsl_r(dest, ARMRegisters::S0)); |
|
128 } |
|
129 |
|
130 void lshift32(Imm32 imm, RegisterID dest) |
|
131 { |
|
132 m_assembler.movs_r(dest, m_assembler.lsl(dest, imm.m_value & 0x1f)); |
|
133 } |
|
134 |
|
135 void mul32(RegisterID src, RegisterID dest) |
|
136 { |
|
137 if (src == dest) { |
|
138 move(src, ARMRegisters::S0); |
|
139 src = ARMRegisters::S0; |
|
140 } |
|
141 m_assembler.muls_r(dest, dest, src); |
|
142 } |
|
143 |
|
144 void mul32(Imm32 imm, RegisterID src, RegisterID dest) |
|
145 { |
|
146 move(imm, ARMRegisters::S0); |
|
147 m_assembler.muls_r(dest, src, ARMRegisters::S0); |
|
148 } |
|
149 |
|
150 void neg32(RegisterID srcDest) |
|
151 { |
|
152 m_assembler.rsbs_r(srcDest, srcDest, ARMAssembler::getOp2(0)); |
|
153 } |
|
154 |
|
155 void not32(RegisterID dest) |
|
156 { |
|
157 m_assembler.mvns_r(dest, dest); |
|
158 } |
|
159 |
|
160 void or32(RegisterID src, RegisterID dest) |
|
161 { |
|
162 m_assembler.orrs_r(dest, dest, src); |
|
163 } |
|
164 |
|
165 void or32(Imm32 imm, RegisterID dest) |
|
166 { |
|
167 m_assembler.orrs_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0)); |
|
168 } |
|
169 |
|
170 void rshift32(RegisterID shift_amount, RegisterID dest) |
|
171 { |
|
172 ARMWord w = ARMAssembler::getOp2(0x1f); |
|
173 ASSERT(w != ARMAssembler::INVALID_IMM); |
|
174 m_assembler.and_r(ARMRegisters::S0, shift_amount, w); |
|
175 |
|
176 m_assembler.movs_r(dest, m_assembler.asr_r(dest, ARMRegisters::S0)); |
|
177 } |
|
178 |
|
179 void rshift32(Imm32 imm, RegisterID dest) |
|
180 { |
|
181 m_assembler.movs_r(dest, m_assembler.asr(dest, imm.m_value & 0x1f)); |
|
182 } |
|
183 |
|
184 void urshift32(RegisterID shift_amount, RegisterID dest) |
|
185 { |
|
186 ARMWord w = ARMAssembler::getOp2(0x1f); |
|
187 ASSERT(w != ARMAssembler::INVALID_IMM); |
|
188 m_assembler.and_r(ARMRegisters::S0, shift_amount, w); |
|
189 |
|
190 m_assembler.movs_r(dest, m_assembler.lsr_r(dest, ARMRegisters::S0)); |
|
191 } |
|
192 |
|
193 void urshift32(Imm32 imm, RegisterID dest) |
|
194 { |
|
195 m_assembler.movs_r(dest, m_assembler.lsr(dest, imm.m_value & 0x1f)); |
|
196 } |
|
197 |
|
198 void sub32(RegisterID src, RegisterID dest) |
|
199 { |
|
200 m_assembler.subs_r(dest, dest, src); |
|
201 } |
|
202 |
|
203 void sub32(Imm32 imm, RegisterID dest) |
|
204 { |
|
205 m_assembler.subs_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0)); |
|
206 } |
|
207 |
|
208 void sub32(Imm32 imm, Address address) |
|
209 { |
|
210 load32(address, ARMRegisters::S1); |
|
211 sub32(imm, ARMRegisters::S1); |
|
212 store32(ARMRegisters::S1, address); |
|
213 } |
|
214 |
|
215 void sub32(Address src, RegisterID dest) |
|
216 { |
|
217 load32(src, ARMRegisters::S1); |
|
218 sub32(ARMRegisters::S1, dest); |
|
219 } |
|
220 |
|
221 void xor32(RegisterID src, RegisterID dest) |
|
222 { |
|
223 m_assembler.eors_r(dest, dest, src); |
|
224 } |
|
225 |
|
226 void xor32(Imm32 imm, RegisterID dest) |
|
227 { |
|
228 m_assembler.eors_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0)); |
|
229 } |
|
230 |
|
231 void load8(ImplicitAddress address, RegisterID dest) |
|
232 { |
|
233 m_assembler.dataTransfer32(true, dest, address.base, address.offset, true); |
|
234 } |
|
235 |
|
236 void load32(ImplicitAddress address, RegisterID dest) |
|
237 { |
|
238 m_assembler.dataTransfer32(true, dest, address.base, address.offset); |
|
239 } |
|
240 |
|
241 void load32(BaseIndex address, RegisterID dest) |
|
242 { |
|
243 m_assembler.baseIndexTransfer32(true, dest, address.base, address.index, static_cast<int>(address.scale), address.offset); |
|
244 } |
|
245 |
|
246 #if CPU(ARMV5_OR_LOWER) |
|
247 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest); |
|
248 #else |
|
249 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest) |
|
250 { |
|
251 load32(address, dest); |
|
252 } |
|
253 #endif |
|
254 |
|
255 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest) |
|
256 { |
|
257 DataLabel32 dataLabel(this); |
|
258 m_assembler.ldr_un_imm(ARMRegisters::S0, 0); |
|
259 m_assembler.dtr_ur(true, dest, address.base, ARMRegisters::S0); |
|
260 return dataLabel; |
|
261 } |
|
262 |
|
263 Label loadPtrWithPatchToLEA(Address address, RegisterID dest) |
|
264 { |
|
265 Label label(this); |
|
266 load32(address, dest); |
|
267 return label; |
|
268 } |
|
269 |
|
270 void load16(BaseIndex address, RegisterID dest) |
|
271 { |
|
272 m_assembler.add_r(ARMRegisters::S1, address.base, m_assembler.lsl(address.index, address.scale)); |
|
273 load16(Address(ARMRegisters::S1, address.offset), dest); |
|
274 } |
|
275 |
|
276 void load16(ImplicitAddress address, RegisterID dest) |
|
277 { |
|
278 if (address.offset >= 0) |
|
279 m_assembler.ldrh_u(dest, address.base, m_assembler.getOffsetForHalfwordDataTransfer(address.offset, ARMRegisters::S0)); |
|
280 else |
|
281 m_assembler.ldrh_d(dest, address.base, m_assembler.getOffsetForHalfwordDataTransfer(-address.offset, ARMRegisters::S0)); |
|
282 } |
|
283 |
|
284 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address) |
|
285 { |
|
286 DataLabel32 dataLabel(this); |
|
287 m_assembler.ldr_un_imm(ARMRegisters::S0, 0); |
|
288 m_assembler.dtr_ur(false, src, address.base, ARMRegisters::S0); |
|
289 return dataLabel; |
|
290 } |
|
291 |
|
292 void store32(RegisterID src, ImplicitAddress address) |
|
293 { |
|
294 m_assembler.dataTransfer32(false, src, address.base, address.offset); |
|
295 } |
|
296 |
|
297 void store32(RegisterID src, BaseIndex address) |
|
298 { |
|
299 m_assembler.baseIndexTransfer32(false, src, address.base, address.index, static_cast<int>(address.scale), address.offset); |
|
300 } |
|
301 |
|
302 void store32(Imm32 imm, ImplicitAddress address) |
|
303 { |
|
304 if (imm.m_isPointer) |
|
305 m_assembler.ldr_un_imm(ARMRegisters::S1, imm.m_value); |
|
306 else |
|
307 move(imm, ARMRegisters::S1); |
|
308 store32(ARMRegisters::S1, address); |
|
309 } |
|
310 |
|
311 void store32(RegisterID src, void* address) |
|
312 { |
|
313 m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address)); |
|
314 m_assembler.dtr_u(false, src, ARMRegisters::S0, 0); |
|
315 } |
|
316 |
|
317 void store32(Imm32 imm, void* address) |
|
318 { |
|
319 m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address)); |
|
320 if (imm.m_isPointer) |
|
321 m_assembler.ldr_un_imm(ARMRegisters::S1, imm.m_value); |
|
322 else |
|
323 m_assembler.moveImm(imm.m_value, ARMRegisters::S1); |
|
324 m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0); |
|
325 } |
|
326 |
|
327 void pop(RegisterID dest) |
|
328 { |
|
329 m_assembler.pop_r(dest); |
|
330 } |
|
331 |
|
332 void push(RegisterID src) |
|
333 { |
|
334 m_assembler.push_r(src); |
|
335 } |
|
336 |
|
337 void push(Address address) |
|
338 { |
|
339 load32(address, ARMRegisters::S1); |
|
340 push(ARMRegisters::S1); |
|
341 } |
|
342 |
|
343 void push(Imm32 imm) |
|
344 { |
|
345 move(imm, ARMRegisters::S0); |
|
346 push(ARMRegisters::S0); |
|
347 } |
|
348 |
|
349 void move(Imm32 imm, RegisterID dest) |
|
350 { |
|
351 if (imm.m_isPointer) |
|
352 m_assembler.ldr_un_imm(dest, imm.m_value); |
|
353 else |
|
354 m_assembler.moveImm(imm.m_value, dest); |
|
355 } |
|
356 |
|
357 void move(RegisterID src, RegisterID dest) |
|
358 { |
|
359 m_assembler.mov_r(dest, src); |
|
360 } |
|
361 |
|
362 void move(ImmPtr imm, RegisterID dest) |
|
363 { |
|
364 move(Imm32(imm), dest); |
|
365 } |
|
366 |
|
367 void swap(RegisterID reg1, RegisterID reg2) |
|
368 { |
|
369 m_assembler.mov_r(ARMRegisters::S0, reg1); |
|
370 m_assembler.mov_r(reg1, reg2); |
|
371 m_assembler.mov_r(reg2, ARMRegisters::S0); |
|
372 } |
|
373 |
|
374 void signExtend32ToPtr(RegisterID src, RegisterID dest) |
|
375 { |
|
376 if (src != dest) |
|
377 move(src, dest); |
|
378 } |
|
379 |
|
380 void zeroExtend32ToPtr(RegisterID src, RegisterID dest) |
|
381 { |
|
382 if (src != dest) |
|
383 move(src, dest); |
|
384 } |
|
385 |
|
386 Jump branch8(Condition cond, Address left, Imm32 right) |
|
387 { |
|
388 load8(left, ARMRegisters::S1); |
|
389 return branch32(cond, ARMRegisters::S1, right); |
|
390 } |
|
391 |
|
392 Jump branch32(Condition cond, RegisterID left, RegisterID right, int useConstantPool = 0) |
|
393 { |
|
394 m_assembler.cmp_r(left, right); |
|
395 return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool)); |
|
396 } |
|
397 |
|
398 Jump branch32(Condition cond, RegisterID left, Imm32 right, int useConstantPool = 0) |
|
399 { |
|
400 if (right.m_isPointer) { |
|
401 m_assembler.ldr_un_imm(ARMRegisters::S0, right.m_value); |
|
402 m_assembler.cmp_r(left, ARMRegisters::S0); |
|
403 } else |
|
404 m_assembler.cmp_r(left, m_assembler.getImm(right.m_value, ARMRegisters::S0)); |
|
405 return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool)); |
|
406 } |
|
407 |
|
408 Jump branch32(Condition cond, RegisterID left, Address right) |
|
409 { |
|
410 load32(right, ARMRegisters::S1); |
|
411 return branch32(cond, left, ARMRegisters::S1); |
|
412 } |
|
413 |
|
414 Jump branch32(Condition cond, Address left, RegisterID right) |
|
415 { |
|
416 load32(left, ARMRegisters::S1); |
|
417 return branch32(cond, ARMRegisters::S1, right); |
|
418 } |
|
419 |
|
420 Jump branch32(Condition cond, Address left, Imm32 right) |
|
421 { |
|
422 load32(left, ARMRegisters::S1); |
|
423 return branch32(cond, ARMRegisters::S1, right); |
|
424 } |
|
425 |
|
426 Jump branch32(Condition cond, BaseIndex left, Imm32 right) |
|
427 { |
|
428 load32(left, ARMRegisters::S1); |
|
429 return branch32(cond, ARMRegisters::S1, right); |
|
430 } |
|
431 |
|
432 Jump branch32WithUnalignedHalfWords(Condition cond, BaseIndex left, Imm32 right) |
|
433 { |
|
434 load32WithUnalignedHalfWords(left, ARMRegisters::S1); |
|
435 return branch32(cond, ARMRegisters::S1, right); |
|
436 } |
|
437 |
|
438 Jump branch16(Condition cond, BaseIndex left, RegisterID right) |
|
439 { |
|
440 UNUSED_PARAM(cond); |
|
441 UNUSED_PARAM(left); |
|
442 UNUSED_PARAM(right); |
|
443 ASSERT_NOT_REACHED(); |
|
444 return jump(); |
|
445 } |
|
446 |
|
447 Jump branch16(Condition cond, BaseIndex left, Imm32 right) |
|
448 { |
|
449 load16(left, ARMRegisters::S0); |
|
450 move(right, ARMRegisters::S1); |
|
451 m_assembler.cmp_r(ARMRegisters::S0, ARMRegisters::S1); |
|
452 return m_assembler.jmp(ARMCondition(cond)); |
|
453 } |
|
454 |
|
455 Jump branchTest8(Condition cond, Address address, Imm32 mask = Imm32(-1)) |
|
456 { |
|
457 load8(address, ARMRegisters::S1); |
|
458 return branchTest32(cond, ARMRegisters::S1, mask); |
|
459 } |
|
460 |
|
461 Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask) |
|
462 { |
|
463 ASSERT((cond == Zero) || (cond == NonZero)); |
|
464 m_assembler.tst_r(reg, mask); |
|
465 return Jump(m_assembler.jmp(ARMCondition(cond))); |
|
466 } |
|
467 |
|
468 Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1)) |
|
469 { |
|
470 ASSERT((cond == Zero) || (cond == NonZero)); |
|
471 ARMWord w = m_assembler.getImm(mask.m_value, ARMRegisters::S0, true); |
|
472 if (w & ARMAssembler::OP2_INV_IMM) |
|
473 m_assembler.bics_r(ARMRegisters::S0, reg, w & ~ARMAssembler::OP2_INV_IMM); |
|
474 else |
|
475 m_assembler.tst_r(reg, w); |
|
476 return Jump(m_assembler.jmp(ARMCondition(cond))); |
|
477 } |
|
478 |
|
479 Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1)) |
|
480 { |
|
481 load32(address, ARMRegisters::S1); |
|
482 return branchTest32(cond, ARMRegisters::S1, mask); |
|
483 } |
|
484 |
|
485 Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1)) |
|
486 { |
|
487 load32(address, ARMRegisters::S1); |
|
488 return branchTest32(cond, ARMRegisters::S1, mask); |
|
489 } |
|
490 |
|
491 Jump jump() |
|
492 { |
|
493 return Jump(m_assembler.jmp()); |
|
494 } |
|
495 |
|
496 void jump(RegisterID target) |
|
497 { |
|
498 m_assembler.bx(target); |
|
499 } |
|
500 |
|
501 void jump(Address address) |
|
502 { |
|
503 load32(address, ARMRegisters::pc); |
|
504 } |
|
505 |
|
506 Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest) |
|
507 { |
|
508 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); |
|
509 add32(src, dest); |
|
510 return Jump(m_assembler.jmp(ARMCondition(cond))); |
|
511 } |
|
512 |
|
513 Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest) |
|
514 { |
|
515 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); |
|
516 add32(imm, dest); |
|
517 return Jump(m_assembler.jmp(ARMCondition(cond))); |
|
518 } |
|
519 |
|
520 void mull32(RegisterID src1, RegisterID src2, RegisterID dest) |
|
521 { |
|
522 if (src1 == dest) { |
|
523 move(src1, ARMRegisters::S0); |
|
524 src1 = ARMRegisters::S0; |
|
525 } |
|
526 m_assembler.mull_r(ARMRegisters::S1, dest, src2, src1); |
|
527 m_assembler.cmp_r(ARMRegisters::S1, m_assembler.asr(dest, 31)); |
|
528 } |
|
529 |
|
530 Jump branchMul32(Condition cond, RegisterID src, RegisterID dest) |
|
531 { |
|
532 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); |
|
533 if (cond == Overflow) { |
|
534 mull32(src, dest, dest); |
|
535 cond = NonZero; |
|
536 } |
|
537 else |
|
538 mul32(src, dest); |
|
539 return Jump(m_assembler.jmp(ARMCondition(cond))); |
|
540 } |
|
541 |
|
542 Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest) |
|
543 { |
|
544 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); |
|
545 if (cond == Overflow) { |
|
546 move(imm, ARMRegisters::S0); |
|
547 mull32(ARMRegisters::S0, src, dest); |
|
548 cond = NonZero; |
|
549 } |
|
550 else |
|
551 mul32(imm, src, dest); |
|
552 return Jump(m_assembler.jmp(ARMCondition(cond))); |
|
553 } |
|
554 |
|
555 Jump branchSub32(Condition cond, RegisterID src, RegisterID dest) |
|
556 { |
|
557 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); |
|
558 sub32(src, dest); |
|
559 return Jump(m_assembler.jmp(ARMCondition(cond))); |
|
560 } |
|
561 |
|
562 Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest) |
|
563 { |
|
564 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); |
|
565 sub32(imm, dest); |
|
566 return Jump(m_assembler.jmp(ARMCondition(cond))); |
|
567 } |
|
568 |
|
569 Jump branchNeg32(Condition cond, RegisterID srcDest) |
|
570 { |
|
571 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); |
|
572 neg32(srcDest); |
|
573 return Jump(m_assembler.jmp(ARMCondition(cond))); |
|
574 } |
|
575 |
|
576 Jump branchOr32(Condition cond, RegisterID src, RegisterID dest) |
|
577 { |
|
578 ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero)); |
|
579 or32(src, dest); |
|
580 return Jump(m_assembler.jmp(ARMCondition(cond))); |
|
581 } |
|
582 |
|
583 void breakpoint() |
|
584 { |
|
585 m_assembler.bkpt(0); |
|
586 } |
|
587 |
|
588 Call nearCall() |
|
589 { |
|
590 #if WTF_ARM_ARCH_AT_LEAST(5) |
|
591 ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord)); |
|
592 m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true); |
|
593 return Call(m_assembler.blx(ARMRegisters::S1), Call::LinkableNear); |
|
594 #else |
|
595 prepareCall(); |
|
596 return Call(m_assembler.jmp(ARMAssembler::AL, true), Call::LinkableNear); |
|
597 #endif |
|
598 } |
|
599 |
|
600 Call call(RegisterID target) |
|
601 { |
|
602 m_assembler.blx(target); |
|
603 JmpSrc jmpSrc; |
|
604 return Call(jmpSrc, Call::None); |
|
605 } |
|
606 |
|
607 void call(Address address) |
|
608 { |
|
609 call32(address.base, address.offset); |
|
610 } |
|
611 |
|
612 void ret() |
|
613 { |
|
614 m_assembler.bx(linkRegister); |
|
615 } |
|
616 |
|
617 void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest) |
|
618 { |
|
619 m_assembler.cmp_r(left, right); |
|
620 m_assembler.mov_r(dest, ARMAssembler::getOp2(0)); |
|
621 m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond)); |
|
622 } |
|
623 |
|
624 void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest) |
|
625 { |
|
626 m_assembler.cmp_r(left, m_assembler.getImm(right.m_value, ARMRegisters::S0)); |
|
627 m_assembler.mov_r(dest, ARMAssembler::getOp2(0)); |
|
628 m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond)); |
|
629 } |
|
630 |
|
631 void set8(Condition cond, RegisterID left, RegisterID right, RegisterID dest) |
|
632 { |
|
633 // ARM doesn't have byte registers |
|
634 set32(cond, left, right, dest); |
|
635 } |
|
636 |
|
637 void set8(Condition cond, Address left, RegisterID right, RegisterID dest) |
|
638 { |
|
639 // ARM doesn't have byte registers |
|
640 load32(left, ARMRegisters::S1); |
|
641 set32(cond, ARMRegisters::S1, right, dest); |
|
642 } |
|
643 |
|
644 void set8(Condition cond, RegisterID left, Imm32 right, RegisterID dest) |
|
645 { |
|
646 // ARM doesn't have byte registers |
|
647 set32(cond, left, right, dest); |
|
648 } |
|
649 |
|
650 void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest) |
|
651 { |
|
652 load32(address, ARMRegisters::S1); |
|
653 if (mask.m_value == -1) |
|
654 m_assembler.cmp_r(0, ARMRegisters::S1); |
|
655 else |
|
656 m_assembler.tst_r(ARMRegisters::S1, m_assembler.getImm(mask.m_value, ARMRegisters::S0)); |
|
657 m_assembler.mov_r(dest, ARMAssembler::getOp2(0)); |
|
658 m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond)); |
|
659 } |
|
660 |
|
661 void setTest8(Condition cond, Address address, Imm32 mask, RegisterID dest) |
|
662 { |
|
663 // ARM doesn't have byte registers |
|
664 setTest32(cond, address, mask, dest); |
|
665 } |
|
666 |
|
667 void add32(Imm32 imm, RegisterID src, RegisterID dest) |
|
668 { |
|
669 m_assembler.add_r(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0)); |
|
670 } |
|
671 |
|
672 void add32(Imm32 imm, AbsoluteAddress address) |
|
673 { |
|
674 m_assembler.ldr_un_imm(ARMRegisters::S1, reinterpret_cast<ARMWord>(address.m_ptr)); |
|
675 m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0); |
|
676 add32(imm, ARMRegisters::S1); |
|
677 m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address.m_ptr)); |
|
678 m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0); |
|
679 } |
|
680 |
|
681 void sub32(Imm32 imm, AbsoluteAddress address) |
|
682 { |
|
683 m_assembler.ldr_un_imm(ARMRegisters::S1, reinterpret_cast<ARMWord>(address.m_ptr)); |
|
684 m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0); |
|
685 sub32(imm, ARMRegisters::S1); |
|
686 m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address.m_ptr)); |
|
687 m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0); |
|
688 } |
|
689 |
|
690 void load32(void* address, RegisterID dest) |
|
691 { |
|
692 m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address)); |
|
693 m_assembler.dtr_u(true, dest, ARMRegisters::S0, 0); |
|
694 } |
|
695 |
|
696 Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right) |
|
697 { |
|
698 load32(left.m_ptr, ARMRegisters::S1); |
|
699 return branch32(cond, ARMRegisters::S1, right); |
|
700 } |
|
701 |
|
702 Jump branch32(Condition cond, AbsoluteAddress left, Imm32 right) |
|
703 { |
|
704 load32(left.m_ptr, ARMRegisters::S1); |
|
705 return branch32(cond, ARMRegisters::S1, right); |
|
706 } |
|
707 |
|
708 Call call() |
|
709 { |
|
710 #if WTF_ARM_ARCH_AT_LEAST(5) |
|
711 ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord)); |
|
712 m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true); |
|
713 return Call(m_assembler.blx(ARMRegisters::S1), Call::Linkable); |
|
714 #else |
|
715 prepareCall(); |
|
716 return Call(m_assembler.jmp(ARMAssembler::AL, true), Call::Linkable); |
|
717 #endif |
|
718 } |
|
719 |
|
720 Call tailRecursiveCall() |
|
721 { |
|
722 return Call::fromTailJump(jump()); |
|
723 } |
|
724 |
|
725 Call makeTailRecursiveCall(Jump oldJump) |
|
726 { |
|
727 return Call::fromTailJump(oldJump); |
|
728 } |
|
729 |
|
730 DataLabelPtr moveWithPatch(ImmPtr initialValue, RegisterID dest) |
|
731 { |
|
732 DataLabelPtr dataLabel(this); |
|
733 m_assembler.ldr_un_imm(dest, reinterpret_cast<ARMWord>(initialValue.m_value)); |
|
734 return dataLabel; |
|
735 } |
|
736 |
|
737 Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0)) |
|
738 { |
|
739 dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S1); |
|
740 Jump jump = branch32(cond, left, ARMRegisters::S1, true); |
|
741 return jump; |
|
742 } |
|
743 |
|
744 Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0)) |
|
745 { |
|
746 load32(left, ARMRegisters::S1); |
|
747 dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S0); |
|
748 Jump jump = branch32(cond, ARMRegisters::S0, ARMRegisters::S1, true); |
|
749 return jump; |
|
750 } |
|
751 |
|
752 DataLabelPtr storePtrWithPatch(ImmPtr initialValue, ImplicitAddress address) |
|
753 { |
|
754 DataLabelPtr dataLabel = moveWithPatch(initialValue, ARMRegisters::S1); |
|
755 store32(ARMRegisters::S1, address); |
|
756 return dataLabel; |
|
757 } |
|
758 |
|
759 DataLabelPtr storePtrWithPatch(ImplicitAddress address) |
|
760 { |
|
761 return storePtrWithPatch(ImmPtr(0), address); |
|
762 } |
|
763 |
|
764 // Floating point operators |
|
765 bool supportsFloatingPoint() const |
|
766 { |
|
767 return s_isVFPPresent; |
|
768 } |
|
769 |
|
770 bool supportsFloatingPointTruncate() const |
|
771 { |
|
772 return false; |
|
773 } |
|
774 |
|
775 bool supportsFloatingPointSqrt() const |
|
776 { |
|
777 return s_isVFPPresent; |
|
778 } |
|
779 |
|
780 void loadDouble(ImplicitAddress address, FPRegisterID dest) |
|
781 { |
|
782 m_assembler.doubleTransfer(true, dest, address.base, address.offset); |
|
783 } |
|
784 |
|
785 void loadDouble(const void* address, FPRegisterID dest) |
|
786 { |
|
787 m_assembler.ldr_un_imm(ARMRegisters::S0, (ARMWord)address); |
|
788 m_assembler.fdtr_u(true, dest, ARMRegisters::S0, 0); |
|
789 } |
|
790 |
|
791 void storeDouble(FPRegisterID src, ImplicitAddress address) |
|
792 { |
|
793 m_assembler.doubleTransfer(false, src, address.base, address.offset); |
|
794 } |
|
795 |
|
796 void addDouble(FPRegisterID src, FPRegisterID dest) |
|
797 { |
|
798 m_assembler.faddd_r(dest, dest, src); |
|
799 } |
|
800 |
|
801 void addDouble(Address src, FPRegisterID dest) |
|
802 { |
|
803 loadDouble(src, ARMRegisters::SD0); |
|
804 addDouble(ARMRegisters::SD0, dest); |
|
805 } |
|
806 |
|
807 void divDouble(FPRegisterID src, FPRegisterID dest) |
|
808 { |
|
809 m_assembler.fdivd_r(dest, dest, src); |
|
810 } |
|
811 |
|
812 void divDouble(Address src, FPRegisterID dest) |
|
813 { |
|
814 ASSERT_NOT_REACHED(); // Untested |
|
815 loadDouble(src, ARMRegisters::SD0); |
|
816 divDouble(ARMRegisters::SD0, dest); |
|
817 } |
|
818 |
|
819 void subDouble(FPRegisterID src, FPRegisterID dest) |
|
820 { |
|
821 m_assembler.fsubd_r(dest, dest, src); |
|
822 } |
|
823 |
|
824 void subDouble(Address src, FPRegisterID dest) |
|
825 { |
|
826 loadDouble(src, ARMRegisters::SD0); |
|
827 subDouble(ARMRegisters::SD0, dest); |
|
828 } |
|
829 |
|
830 void mulDouble(FPRegisterID src, FPRegisterID dest) |
|
831 { |
|
832 m_assembler.fmuld_r(dest, dest, src); |
|
833 } |
|
834 |
|
835 void mulDouble(Address src, FPRegisterID dest) |
|
836 { |
|
837 loadDouble(src, ARMRegisters::SD0); |
|
838 mulDouble(ARMRegisters::SD0, dest); |
|
839 } |
|
840 |
|
841 void sqrtDouble(FPRegisterID src, FPRegisterID dest) |
|
842 { |
|
843 m_assembler.fsqrtd_r(dest, src); |
|
844 } |
|
845 |
|
846 void convertInt32ToDouble(RegisterID src, FPRegisterID dest) |
|
847 { |
|
848 m_assembler.fmsr_r(dest, src); |
|
849 m_assembler.fsitod_r(dest, dest); |
|
850 } |
|
851 |
|
852 void convertInt32ToDouble(Address src, FPRegisterID dest) |
|
853 { |
|
854 ASSERT_NOT_REACHED(); // Untested |
|
855 // flds does not worth the effort here |
|
856 load32(src, ARMRegisters::S1); |
|
857 convertInt32ToDouble(ARMRegisters::S1, dest); |
|
858 } |
|
859 |
|
860 void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest) |
|
861 { |
|
862 ASSERT_NOT_REACHED(); // Untested |
|
863 // flds does not worth the effort here |
|
864 m_assembler.ldr_un_imm(ARMRegisters::S1, (ARMWord)src.m_ptr); |
|
865 m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0); |
|
866 convertInt32ToDouble(ARMRegisters::S1, dest); |
|
867 } |
|
868 |
|
869 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right) |
|
870 { |
|
871 m_assembler.fcmpd_r(left, right); |
|
872 m_assembler.fmstat(); |
|
873 if (cond & DoubleConditionBitSpecial) |
|
874 m_assembler.cmp_r(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::VS); |
|
875 return Jump(m_assembler.jmp(static_cast<ARMAssembler::Condition>(cond & ~DoubleConditionMask))); |
|
876 } |
|
877 |
|
878 // Truncates 'src' to an integer, and places the resulting 'dest'. |
|
879 // If the result is not representable as a 32 bit value, branch. |
|
880 // May also branch for some values that are representable in 32 bits |
|
881 // (specifically, in this case, INT_MIN). |
|
882 Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest) |
|
883 { |
|
884 UNUSED_PARAM(src); |
|
885 UNUSED_PARAM(dest); |
|
886 ASSERT_NOT_REACHED(); |
|
887 return jump(); |
|
888 } |
|
889 |
|
890 // Convert 'src' to an integer, and places the resulting 'dest'. |
|
891 // If the result is not representable as a 32 bit value, branch. |
|
892 // May also branch for some values that are representable in 32 bits |
|
893 // (specifically, in this case, 0). |
|
894 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp) |
|
895 { |
|
896 m_assembler.ftosid_r(ARMRegisters::SD0, src); |
|
897 m_assembler.fmrs_r(dest, ARMRegisters::SD0); |
|
898 |
|
899 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump. |
|
900 m_assembler.fsitod_r(ARMRegisters::SD0, ARMRegisters::SD0); |
|
901 failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, ARMRegisters::SD0)); |
|
902 |
|
903 // If the result is zero, it might have been -0.0, and 0.0 equals to -0.0 |
|
904 failureCases.append(branchTest32(Zero, dest)); |
|
905 } |
|
906 |
|
907 void zeroDouble(FPRegisterID srcDest) |
|
908 { |
|
909 m_assembler.mov_r(ARMRegisters::S0, ARMAssembler::getOp2(0)); |
|
910 convertInt32ToDouble(ARMRegisters::S0, srcDest); |
|
911 } |
|
912 |
|
913 protected: |
|
914 ARMAssembler::Condition ARMCondition(Condition cond) |
|
915 { |
|
916 return static_cast<ARMAssembler::Condition>(cond); |
|
917 } |
|
918 |
|
919 void ensureSpace(int insnSpace, int constSpace) |
|
920 { |
|
921 m_assembler.ensureSpace(insnSpace, constSpace); |
|
922 } |
|
923 |
|
924 int sizeOfConstantPool() |
|
925 { |
|
926 return m_assembler.sizeOfConstantPool(); |
|
927 } |
|
928 |
|
929 void prepareCall() |
|
930 { |
|
931 #if WTF_ARM_ARCH_VERSION < 5 |
|
932 ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord)); |
|
933 |
|
934 m_assembler.mov_r(linkRegister, ARMRegisters::pc); |
|
935 #endif |
|
936 } |
|
937 |
|
938 void call32(RegisterID base, int32_t offset) |
|
939 { |
|
940 #if WTF_ARM_ARCH_AT_LEAST(5) |
|
941 int targetReg = ARMRegisters::S1; |
|
942 #else |
|
943 int targetReg = ARMRegisters::pc; |
|
944 #endif |
|
945 int tmpReg = ARMRegisters::S1; |
|
946 |
|
947 if (base == ARMRegisters::sp) |
|
948 offset += 4; |
|
949 |
|
950 if (offset >= 0) { |
|
951 if (offset <= 0xfff) { |
|
952 prepareCall(); |
|
953 m_assembler.dtr_u(true, targetReg, base, offset); |
|
954 } else if (offset <= 0xfffff) { |
|
955 m_assembler.add_r(tmpReg, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8)); |
|
956 prepareCall(); |
|
957 m_assembler.dtr_u(true, targetReg, tmpReg, offset & 0xfff); |
|
958 } else { |
|
959 m_assembler.moveImm(offset, tmpReg); |
|
960 prepareCall(); |
|
961 m_assembler.dtr_ur(true, targetReg, base, tmpReg); |
|
962 } |
|
963 } else { |
|
964 offset = -offset; |
|
965 if (offset <= 0xfff) { |
|
966 prepareCall(); |
|
967 m_assembler.dtr_d(true, targetReg, base, offset); |
|
968 } else if (offset <= 0xfffff) { |
|
969 m_assembler.sub_r(tmpReg, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8)); |
|
970 prepareCall(); |
|
971 m_assembler.dtr_d(true, targetReg, tmpReg, offset & 0xfff); |
|
972 } else { |
|
973 m_assembler.moveImm(offset, tmpReg); |
|
974 prepareCall(); |
|
975 m_assembler.dtr_dr(true, targetReg, base, tmpReg); |
|
976 } |
|
977 } |
|
978 #if WTF_ARM_ARCH_AT_LEAST(5) |
|
979 m_assembler.blx(targetReg); |
|
980 #endif |
|
981 } |
|
982 |
|
983 private: |
|
984 friend class LinkBuffer; |
|
985 friend class RepatchBuffer; |
|
986 |
|
987 static void linkCall(void* code, Call call, FunctionPtr function) |
|
988 { |
|
989 ARMAssembler::linkCall(code, call.m_jmp, function.value()); |
|
990 } |
|
991 |
|
992 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) |
|
993 { |
|
994 ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress()); |
|
995 } |
|
996 |
|
997 static void repatchCall(CodeLocationCall call, FunctionPtr destination) |
|
998 { |
|
999 ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress()); |
|
1000 } |
|
1001 |
|
1002 static const bool s_isVFPPresent; |
|
1003 }; |
|
1004 |
|
1005 } |
|
1006 |
|
1007 #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) |
|
1008 |
|
1009 #endif // MacroAssemblerARM_h |