|
1 /* |
|
2 * Copyright (C) 2009 Apple Inc. All rights reserved. |
|
3 * Copyright (C) 2010 University of Szeged |
|
4 * |
|
5 * Redistribution and use in source and binary forms, with or without |
|
6 * modification, are permitted provided that the following conditions |
|
7 * are met: |
|
8 * 1. Redistributions of source code must retain the above copyright |
|
9 * notice, this list of conditions and the following disclaimer. |
|
10 * 2. Redistributions in binary form must reproduce the above copyright |
|
11 * notice, this list of conditions and the following disclaimer in the |
|
12 * documentation and/or other materials provided with the distribution. |
|
13 * |
|
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
|
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
|
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
|
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
|
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
|
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
|
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
|
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
|
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
|
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|
25 */ |
|
26 |
|
27 #ifndef MacroAssemblerARMv7_h |
|
28 #define MacroAssemblerARMv7_h |
|
29 |
|
30 #if ENABLE(ASSEMBLER) |
|
31 |
|
32 #include "ARMv7Assembler.h" |
|
33 #include "AbstractMacroAssembler.h" |
|
34 |
|
35 namespace JSC { |
|
36 |
|
37 class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler> { |
|
38 // FIXME: switch dataTempRegister & addressTempRegister, or possibly use r7? |
|
39 // - dTR is likely used more than aTR, and we'll get better instruction |
|
40 // encoding if it's in the low 8 registers. |
|
41 static const RegisterID dataTempRegister = ARMRegisters::ip; |
|
42 static const RegisterID addressTempRegister = ARMRegisters::r3; |
|
43 |
|
44 static const ARMRegisters::FPDoubleRegisterID fpTempRegister = ARMRegisters::d7; |
|
45 inline ARMRegisters::FPSingleRegisterID fpTempRegisterAsSingle() { return ARMRegisters::asSingle(fpTempRegister); } |
|
46 |
|
47 public: |
|
48 struct ArmAddress { |
|
49 enum AddressType { |
|
50 HasOffset, |
|
51 HasIndex, |
|
52 } type; |
|
53 RegisterID base; |
|
54 union { |
|
55 int32_t offset; |
|
56 struct { |
|
57 RegisterID index; |
|
58 Scale scale; |
|
59 }; |
|
60 } u; |
|
61 |
|
62 explicit ArmAddress(RegisterID base, int32_t offset = 0) |
|
63 : type(HasOffset) |
|
64 , base(base) |
|
65 { |
|
66 u.offset = offset; |
|
67 } |
|
68 |
|
69 explicit ArmAddress(RegisterID base, RegisterID index, Scale scale = TimesOne) |
|
70 : type(HasIndex) |
|
71 , base(base) |
|
72 { |
|
73 u.index = index; |
|
74 u.scale = scale; |
|
75 } |
|
76 }; |
|
77 |
|
78 public: |
|
79 typedef ARMRegisters::FPDoubleRegisterID FPRegisterID; |
|
80 |
|
81 static const Scale ScalePtr = TimesFour; |
|
82 |
|
83 enum Condition { |
|
84 Equal = ARMv7Assembler::ConditionEQ, |
|
85 NotEqual = ARMv7Assembler::ConditionNE, |
|
86 Above = ARMv7Assembler::ConditionHI, |
|
87 AboveOrEqual = ARMv7Assembler::ConditionHS, |
|
88 Below = ARMv7Assembler::ConditionLO, |
|
89 BelowOrEqual = ARMv7Assembler::ConditionLS, |
|
90 GreaterThan = ARMv7Assembler::ConditionGT, |
|
91 GreaterThanOrEqual = ARMv7Assembler::ConditionGE, |
|
92 LessThan = ARMv7Assembler::ConditionLT, |
|
93 LessThanOrEqual = ARMv7Assembler::ConditionLE, |
|
94 Overflow = ARMv7Assembler::ConditionVS, |
|
95 Signed = ARMv7Assembler::ConditionMI, |
|
96 Zero = ARMv7Assembler::ConditionEQ, |
|
97 NonZero = ARMv7Assembler::ConditionNE |
|
98 }; |
|
99 enum DoubleCondition { |
|
100 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN. |
|
101 DoubleEqual = ARMv7Assembler::ConditionEQ, |
|
102 DoubleNotEqual = ARMv7Assembler::ConditionVC, // Not the right flag! check for this & handle differently. |
|
103 DoubleGreaterThan = ARMv7Assembler::ConditionGT, |
|
104 DoubleGreaterThanOrEqual = ARMv7Assembler::ConditionGE, |
|
105 DoubleLessThan = ARMv7Assembler::ConditionLO, |
|
106 DoubleLessThanOrEqual = ARMv7Assembler::ConditionLS, |
|
107 // If either operand is NaN, these conditions always evaluate to true. |
|
108 DoubleEqualOrUnordered = ARMv7Assembler::ConditionVS, // Not the right flag! check for this & handle differently. |
|
109 DoubleNotEqualOrUnordered = ARMv7Assembler::ConditionNE, |
|
110 DoubleGreaterThanOrUnordered = ARMv7Assembler::ConditionHI, |
|
111 DoubleGreaterThanOrEqualOrUnordered = ARMv7Assembler::ConditionHS, |
|
112 DoubleLessThanOrUnordered = ARMv7Assembler::ConditionLT, |
|
113 DoubleLessThanOrEqualOrUnordered = ARMv7Assembler::ConditionLE, |
|
114 }; |
|
115 |
|
116 static const RegisterID stackPointerRegister = ARMRegisters::sp; |
|
117 static const RegisterID linkRegister = ARMRegisters::lr; |
|
118 |
|
119 // Integer arithmetic operations: |
|
120 // |
|
121 // Operations are typically two operand - operation(source, srcDst) |
|
122 // For many operations the source may be an Imm32, the srcDst operand |
|
123 // may often be a memory location (explictly described using an Address |
|
124 // object). |
|
125 |
|
126 void add32(RegisterID src, RegisterID dest) |
|
127 { |
|
128 m_assembler.add(dest, dest, src); |
|
129 } |
|
130 |
|
131 void add32(Imm32 imm, RegisterID dest) |
|
132 { |
|
133 add32(imm, dest, dest); |
|
134 } |
|
135 |
|
136 void add32(Imm32 imm, RegisterID src, RegisterID dest) |
|
137 { |
|
138 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value); |
|
139 if (armImm.isValid()) |
|
140 m_assembler.add(dest, src, armImm); |
|
141 else { |
|
142 move(imm, dataTempRegister); |
|
143 m_assembler.add(dest, src, dataTempRegister); |
|
144 } |
|
145 } |
|
146 |
|
147 void add32(Imm32 imm, Address address) |
|
148 { |
|
149 load32(address, dataTempRegister); |
|
150 |
|
151 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value); |
|
152 if (armImm.isValid()) |
|
153 m_assembler.add(dataTempRegister, dataTempRegister, armImm); |
|
154 else { |
|
155 // Hrrrm, since dataTempRegister holds the data loaded, |
|
156 // use addressTempRegister to hold the immediate. |
|
157 move(imm, addressTempRegister); |
|
158 m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister); |
|
159 } |
|
160 |
|
161 store32(dataTempRegister, address); |
|
162 } |
|
163 |
|
164 void add32(Address src, RegisterID dest) |
|
165 { |
|
166 load32(src, dataTempRegister); |
|
167 add32(dataTempRegister, dest); |
|
168 } |
|
169 |
|
170 void add32(Imm32 imm, AbsoluteAddress address) |
|
171 { |
|
172 load32(address.m_ptr, dataTempRegister); |
|
173 |
|
174 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value); |
|
175 if (armImm.isValid()) |
|
176 m_assembler.add(dataTempRegister, dataTempRegister, armImm); |
|
177 else { |
|
178 // Hrrrm, since dataTempRegister holds the data loaded, |
|
179 // use addressTempRegister to hold the immediate. |
|
180 move(imm, addressTempRegister); |
|
181 m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister); |
|
182 } |
|
183 |
|
184 store32(dataTempRegister, address.m_ptr); |
|
185 } |
|
186 |
|
187 void and32(RegisterID src, RegisterID dest) |
|
188 { |
|
189 m_assembler.ARM_and(dest, dest, src); |
|
190 } |
|
191 |
|
192 void and32(Imm32 imm, RegisterID dest) |
|
193 { |
|
194 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value); |
|
195 if (armImm.isValid()) |
|
196 m_assembler.ARM_and(dest, dest, armImm); |
|
197 else { |
|
198 move(imm, dataTempRegister); |
|
199 m_assembler.ARM_and(dest, dest, dataTempRegister); |
|
200 } |
|
201 } |
|
202 |
|
203 void lshift32(RegisterID shift_amount, RegisterID dest) |
|
204 { |
|
205 // Clamp the shift to the range 0..31 |
|
206 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f); |
|
207 ASSERT(armImm.isValid()); |
|
208 m_assembler.ARM_and(dataTempRegister, shift_amount, armImm); |
|
209 |
|
210 m_assembler.lsl(dest, dest, dataTempRegister); |
|
211 } |
|
212 |
|
213 void lshift32(Imm32 imm, RegisterID dest) |
|
214 { |
|
215 m_assembler.lsl(dest, dest, imm.m_value & 0x1f); |
|
216 } |
|
217 |
|
218 void mul32(RegisterID src, RegisterID dest) |
|
219 { |
|
220 m_assembler.smull(dest, dataTempRegister, dest, src); |
|
221 } |
|
222 |
|
223 void mul32(Imm32 imm, RegisterID src, RegisterID dest) |
|
224 { |
|
225 move(imm, dataTempRegister); |
|
226 m_assembler.smull(dest, dataTempRegister, src, dataTempRegister); |
|
227 } |
|
228 |
|
229 void neg32(RegisterID srcDest) |
|
230 { |
|
231 m_assembler.neg(srcDest, srcDest); |
|
232 } |
|
233 |
|
234 void not32(RegisterID srcDest) |
|
235 { |
|
236 m_assembler.mvn(srcDest, srcDest); |
|
237 } |
|
238 |
|
239 void or32(RegisterID src, RegisterID dest) |
|
240 { |
|
241 m_assembler.orr(dest, dest, src); |
|
242 } |
|
243 |
|
244 void or32(Imm32 imm, RegisterID dest) |
|
245 { |
|
246 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value); |
|
247 if (armImm.isValid()) |
|
248 m_assembler.orr(dest, dest, armImm); |
|
249 else { |
|
250 move(imm, dataTempRegister); |
|
251 m_assembler.orr(dest, dest, dataTempRegister); |
|
252 } |
|
253 } |
|
254 |
|
255 void rshift32(RegisterID shift_amount, RegisterID dest) |
|
256 { |
|
257 // Clamp the shift to the range 0..31 |
|
258 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f); |
|
259 ASSERT(armImm.isValid()); |
|
260 m_assembler.ARM_and(dataTempRegister, shift_amount, armImm); |
|
261 |
|
262 m_assembler.asr(dest, dest, dataTempRegister); |
|
263 } |
|
264 |
|
265 void rshift32(Imm32 imm, RegisterID dest) |
|
266 { |
|
267 m_assembler.asr(dest, dest, imm.m_value & 0x1f); |
|
268 } |
|
269 |
|
270 void urshift32(RegisterID shift_amount, RegisterID dest) |
|
271 { |
|
272 // Clamp the shift to the range 0..31 |
|
273 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f); |
|
274 ASSERT(armImm.isValid()); |
|
275 m_assembler.ARM_and(dataTempRegister, shift_amount, armImm); |
|
276 |
|
277 m_assembler.lsr(dest, dest, dataTempRegister); |
|
278 } |
|
279 |
|
280 void urshift32(Imm32 imm, RegisterID dest) |
|
281 { |
|
282 m_assembler.lsr(dest, dest, imm.m_value & 0x1f); |
|
283 } |
|
284 |
|
285 void sub32(RegisterID src, RegisterID dest) |
|
286 { |
|
287 m_assembler.sub(dest, dest, src); |
|
288 } |
|
289 |
|
290 void sub32(Imm32 imm, RegisterID dest) |
|
291 { |
|
292 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value); |
|
293 if (armImm.isValid()) |
|
294 m_assembler.sub(dest, dest, armImm); |
|
295 else { |
|
296 move(imm, dataTempRegister); |
|
297 m_assembler.sub(dest, dest, dataTempRegister); |
|
298 } |
|
299 } |
|
300 |
|
301 void sub32(Imm32 imm, Address address) |
|
302 { |
|
303 load32(address, dataTempRegister); |
|
304 |
|
305 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value); |
|
306 if (armImm.isValid()) |
|
307 m_assembler.sub(dataTempRegister, dataTempRegister, armImm); |
|
308 else { |
|
309 // Hrrrm, since dataTempRegister holds the data loaded, |
|
310 // use addressTempRegister to hold the immediate. |
|
311 move(imm, addressTempRegister); |
|
312 m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister); |
|
313 } |
|
314 |
|
315 store32(dataTempRegister, address); |
|
316 } |
|
317 |
|
318 void sub32(Address src, RegisterID dest) |
|
319 { |
|
320 load32(src, dataTempRegister); |
|
321 sub32(dataTempRegister, dest); |
|
322 } |
|
323 |
|
324 void sub32(Imm32 imm, AbsoluteAddress address) |
|
325 { |
|
326 load32(address.m_ptr, dataTempRegister); |
|
327 |
|
328 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value); |
|
329 if (armImm.isValid()) |
|
330 m_assembler.sub(dataTempRegister, dataTempRegister, armImm); |
|
331 else { |
|
332 // Hrrrm, since dataTempRegister holds the data loaded, |
|
333 // use addressTempRegister to hold the immediate. |
|
334 move(imm, addressTempRegister); |
|
335 m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister); |
|
336 } |
|
337 |
|
338 store32(dataTempRegister, address.m_ptr); |
|
339 } |
|
340 |
|
341 void xor32(RegisterID src, RegisterID dest) |
|
342 { |
|
343 m_assembler.eor(dest, dest, src); |
|
344 } |
|
345 |
|
346 void xor32(Imm32 imm, RegisterID dest) |
|
347 { |
|
348 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value); |
|
349 if (armImm.isValid()) |
|
350 m_assembler.eor(dest, dest, armImm); |
|
351 else { |
|
352 move(imm, dataTempRegister); |
|
353 m_assembler.eor(dest, dest, dataTempRegister); |
|
354 } |
|
355 } |
|
356 |
|
357 |
|
358 // Memory access operations: |
|
359 // |
|
360 // Loads are of the form load(address, destination) and stores of the form |
|
361 // store(source, address). The source for a store may be an Imm32. Address |
|
362 // operand objects to loads and store will be implicitly constructed if a |
|
363 // register is passed. |
|
364 |
|
365 private: |
|
366 void load32(ArmAddress address, RegisterID dest) |
|
367 { |
|
368 if (address.type == ArmAddress::HasIndex) |
|
369 m_assembler.ldr(dest, address.base, address.u.index, address.u.scale); |
|
370 else if (address.u.offset >= 0) { |
|
371 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset); |
|
372 ASSERT(armImm.isValid()); |
|
373 m_assembler.ldr(dest, address.base, armImm); |
|
374 } else { |
|
375 ASSERT(address.u.offset >= -255); |
|
376 m_assembler.ldr(dest, address.base, address.u.offset, true, false); |
|
377 } |
|
378 } |
|
379 |
|
380 void load16(ArmAddress address, RegisterID dest) |
|
381 { |
|
382 if (address.type == ArmAddress::HasIndex) |
|
383 m_assembler.ldrh(dest, address.base, address.u.index, address.u.scale); |
|
384 else if (address.u.offset >= 0) { |
|
385 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset); |
|
386 ASSERT(armImm.isValid()); |
|
387 m_assembler.ldrh(dest, address.base, armImm); |
|
388 } else { |
|
389 ASSERT(address.u.offset >= -255); |
|
390 m_assembler.ldrh(dest, address.base, address.u.offset, true, false); |
|
391 } |
|
392 } |
|
393 |
|
394 void load8(ArmAddress address, RegisterID dest) |
|
395 { |
|
396 if (address.type == ArmAddress::HasIndex) |
|
397 m_assembler.ldrb(dest, address.base, address.u.index, address.u.scale); |
|
398 else if (address.u.offset >= 0) { |
|
399 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset); |
|
400 ASSERT(armImm.isValid()); |
|
401 m_assembler.ldrb(dest, address.base, armImm); |
|
402 } else { |
|
403 ASSERT(address.u.offset >= -255); |
|
404 m_assembler.ldrb(dest, address.base, address.u.offset, true, false); |
|
405 } |
|
406 } |
|
407 |
|
408 void store32(RegisterID src, ArmAddress address) |
|
409 { |
|
410 if (address.type == ArmAddress::HasIndex) |
|
411 m_assembler.str(src, address.base, address.u.index, address.u.scale); |
|
412 else if (address.u.offset >= 0) { |
|
413 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset); |
|
414 ASSERT(armImm.isValid()); |
|
415 m_assembler.str(src, address.base, armImm); |
|
416 } else { |
|
417 ASSERT(address.u.offset >= -255); |
|
418 m_assembler.str(src, address.base, address.u.offset, true, false); |
|
419 } |
|
420 } |
|
421 |
|
422 public: |
|
423 void load32(ImplicitAddress address, RegisterID dest) |
|
424 { |
|
425 load32(setupArmAddress(address), dest); |
|
426 } |
|
427 |
|
428 void load32(BaseIndex address, RegisterID dest) |
|
429 { |
|
430 load32(setupArmAddress(address), dest); |
|
431 } |
|
432 |
|
433 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest) |
|
434 { |
|
435 load32(setupArmAddress(address), dest); |
|
436 } |
|
437 |
|
438 void load32(void* address, RegisterID dest) |
|
439 { |
|
440 move(ImmPtr(address), addressTempRegister); |
|
441 m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0)); |
|
442 } |
|
443 |
|
444 void load8(ImplicitAddress address, RegisterID dest) |
|
445 { |
|
446 load8(setupArmAddress(address), dest); |
|
447 } |
|
448 |
|
449 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest) |
|
450 { |
|
451 DataLabel32 label = moveWithPatch(Imm32(address.offset), dataTempRegister); |
|
452 load32(ArmAddress(address.base, dataTempRegister), dest); |
|
453 return label; |
|
454 } |
|
455 |
|
456 Label loadPtrWithPatchToLEA(Address address, RegisterID dest) |
|
457 { |
|
458 Label label(this); |
|
459 moveFixedWidthEncoding(Imm32(address.offset), dataTempRegister); |
|
460 load32(ArmAddress(address.base, dataTempRegister), dest); |
|
461 return label; |
|
462 } |
|
463 |
|
464 void load16(BaseIndex address, RegisterID dest) |
|
465 { |
|
466 m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale); |
|
467 } |
|
468 |
|
469 void load16(ImplicitAddress address, RegisterID dest) |
|
470 { |
|
471 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.offset); |
|
472 if (armImm.isValid()) |
|
473 m_assembler.ldrh(dest, address.base, armImm); |
|
474 else { |
|
475 move(Imm32(address.offset), dataTempRegister); |
|
476 m_assembler.ldrh(dest, address.base, dataTempRegister); |
|
477 } |
|
478 } |
|
479 |
|
480 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address) |
|
481 { |
|
482 DataLabel32 label = moveWithPatch(Imm32(address.offset), dataTempRegister); |
|
483 store32(src, ArmAddress(address.base, dataTempRegister)); |
|
484 return label; |
|
485 } |
|
486 |
|
487 void store32(RegisterID src, ImplicitAddress address) |
|
488 { |
|
489 store32(src, setupArmAddress(address)); |
|
490 } |
|
491 |
|
492 void store32(RegisterID src, BaseIndex address) |
|
493 { |
|
494 store32(src, setupArmAddress(address)); |
|
495 } |
|
496 |
|
497 void store32(Imm32 imm, ImplicitAddress address) |
|
498 { |
|
499 move(imm, dataTempRegister); |
|
500 store32(dataTempRegister, setupArmAddress(address)); |
|
501 } |
|
502 |
|
503 void store32(RegisterID src, void* address) |
|
504 { |
|
505 move(ImmPtr(address), addressTempRegister); |
|
506 m_assembler.str(src, addressTempRegister, ARMThumbImmediate::makeUInt16(0)); |
|
507 } |
|
508 |
|
509 void store32(Imm32 imm, void* address) |
|
510 { |
|
511 move(imm, dataTempRegister); |
|
512 store32(dataTempRegister, address); |
|
513 } |
|
514 |
|
515 |
|
516 // Floating-point operations: |
|
517 |
|
518 bool supportsFloatingPoint() const { return true; } |
|
519 // On x86(_64) the MacroAssembler provides an interface to truncate a double to an integer. |
|
520 // If a value is not representable as an integer, and possibly for some values that are, |
|
521 // (on x86 INT_MIN, since this is indistinguishable from results for out-of-range/NaN input) |
|
522 // a branch will be taken. It is not clear whether this interface will be well suited to |
|
523 // other platforms. On ARMv7 the hardware truncation operation produces multiple possible |
|
524 // failure values (saturates to INT_MIN & INT_MAX, NaN reulsts in a value of 0). This is a |
|
525 // temporary solution while we work out what this interface should be. Either we need to |
|
526 // decide to make this interface work on all platforms, rework the interface to make it more |
|
527 // generic, or decide that the MacroAssembler cannot practically be used to abstracted these |
|
528 // operations, and make clients go directly to the m_assembler to plant truncation instructions. |
|
529 // In short, FIXME:. |
|
530 bool supportsFloatingPointTruncate() const { return false; } |
|
531 |
|
532 bool supportsFloatingPointSqrt() const |
|
533 { |
|
534 return false; |
|
535 } |
|
536 |
|
537 void loadDouble(ImplicitAddress address, FPRegisterID dest) |
|
538 { |
|
539 RegisterID base = address.base; |
|
540 int32_t offset = address.offset; |
|
541 |
|
542 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2. |
|
543 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) { |
|
544 add32(Imm32(offset), base, addressTempRegister); |
|
545 base = addressTempRegister; |
|
546 offset = 0; |
|
547 } |
|
548 |
|
549 m_assembler.vldr(dest, base, offset); |
|
550 } |
|
551 |
|
552 void loadDouble(const void* address, FPRegisterID dest) |
|
553 { |
|
554 move(ImmPtr(address), addressTempRegister); |
|
555 m_assembler.vldr(dest, addressTempRegister, 0); |
|
556 } |
|
557 |
|
558 void storeDouble(FPRegisterID src, ImplicitAddress address) |
|
559 { |
|
560 RegisterID base = address.base; |
|
561 int32_t offset = address.offset; |
|
562 |
|
563 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2. |
|
564 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) { |
|
565 add32(Imm32(offset), base, addressTempRegister); |
|
566 base = addressTempRegister; |
|
567 offset = 0; |
|
568 } |
|
569 |
|
570 m_assembler.vstr(src, base, offset); |
|
571 } |
|
572 |
|
573 void addDouble(FPRegisterID src, FPRegisterID dest) |
|
574 { |
|
575 m_assembler.vadd_F64(dest, dest, src); |
|
576 } |
|
577 |
|
578 void addDouble(Address src, FPRegisterID dest) |
|
579 { |
|
580 loadDouble(src, fpTempRegister); |
|
581 addDouble(fpTempRegister, dest); |
|
582 } |
|
583 |
|
584 void divDouble(FPRegisterID src, FPRegisterID dest) |
|
585 { |
|
586 m_assembler.vdiv_F64(dest, dest, src); |
|
587 } |
|
588 |
|
589 void subDouble(FPRegisterID src, FPRegisterID dest) |
|
590 { |
|
591 m_assembler.vsub_F64(dest, dest, src); |
|
592 } |
|
593 |
|
594 void subDouble(Address src, FPRegisterID dest) |
|
595 { |
|
596 loadDouble(src, fpTempRegister); |
|
597 subDouble(fpTempRegister, dest); |
|
598 } |
|
599 |
|
600 void mulDouble(FPRegisterID src, FPRegisterID dest) |
|
601 { |
|
602 m_assembler.vmul_F64(dest, dest, src); |
|
603 } |
|
604 |
|
605 void mulDouble(Address src, FPRegisterID dest) |
|
606 { |
|
607 loadDouble(src, fpTempRegister); |
|
608 mulDouble(fpTempRegister, dest); |
|
609 } |
|
610 |
|
611 void sqrtDouble(FPRegisterID, FPRegisterID) |
|
612 { |
|
613 ASSERT_NOT_REACHED(); |
|
614 } |
|
615 |
|
616 void convertInt32ToDouble(RegisterID src, FPRegisterID dest) |
|
617 { |
|
618 m_assembler.vmov(fpTempRegisterAsSingle(), src); |
|
619 m_assembler.vcvt_F64_S32(dest, fpTempRegisterAsSingle()); |
|
620 } |
|
621 |
|
622 void convertInt32ToDouble(Address address, FPRegisterID dest) |
|
623 { |
|
624 // Fixme: load directly into the fpr! |
|
625 load32(address, dataTempRegister); |
|
626 m_assembler.vmov(fpTempRegisterAsSingle(), dataTempRegister); |
|
627 m_assembler.vcvt_F64_S32(dest, fpTempRegisterAsSingle()); |
|
628 } |
|
629 |
|
630 void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest) |
|
631 { |
|
632 // Fixme: load directly into the fpr! |
|
633 load32(address.m_ptr, dataTempRegister); |
|
634 m_assembler.vmov(fpTempRegisterAsSingle(), dataTempRegister); |
|
635 m_assembler.vcvt_F64_S32(dest, fpTempRegisterAsSingle()); |
|
636 } |
|
637 |
|
638 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right) |
|
639 { |
|
640 m_assembler.vcmp_F64(left, right); |
|
641 m_assembler.vmrs(); |
|
642 |
|
643 if (cond == DoubleNotEqual) { |
|
644 // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump. |
|
645 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS); |
|
646 Jump result = makeBranch(ARMv7Assembler::ConditionNE); |
|
647 unordered.link(this); |
|
648 return result; |
|
649 } |
|
650 if (cond == DoubleEqualOrUnordered) { |
|
651 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS); |
|
652 Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE); |
|
653 unordered.link(this); |
|
654 // We get here if either unordered, or equal. |
|
655 Jump result = makeJump(); |
|
656 notEqual.link(this); |
|
657 return result; |
|
658 } |
|
659 return makeBranch(cond); |
|
660 } |
|
661 |
|
662 Jump branchTruncateDoubleToInt32(FPRegisterID, RegisterID) |
|
663 { |
|
664 ASSERT_NOT_REACHED(); |
|
665 return jump(); |
|
666 } |
|
667 |
|
668 // Convert 'src' to an integer, and places the resulting 'dest'. |
|
669 // If the result is not representable as a 32 bit value, branch. |
|
670 // May also branch for some values that are representable in 32 bits |
|
671 // (specifically, in this case, 0). |
|
672 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID) |
|
673 { |
|
674 m_assembler.vcvtr_S32_F64(fpTempRegisterAsSingle(), src); |
|
675 m_assembler.vmov(dest, fpTempRegisterAsSingle()); |
|
676 |
|
677 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump. |
|
678 m_assembler.vcvt_F64_S32(fpTempRegister, fpTempRegisterAsSingle()); |
|
679 failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister)); |
|
680 |
|
681 // If the result is zero, it might have been -0.0, and the double comparison won't catch this! |
|
682 failureCases.append(branchTest32(Zero, dest)); |
|
683 } |
|
684 |
|
685 void zeroDouble(FPRegisterID dest) |
|
686 { |
|
687 m_assembler.vmov_F64_0(dest); |
|
688 } |
|
689 |
|
690 // Stack manipulation operations: |
|
691 // |
|
692 // The ABI is assumed to provide a stack abstraction to memory, |
|
693 // containing machine word sized units of data. Push and pop |
|
694 // operations add and remove a single register sized unit of data |
|
695 // to or from the stack. Peek and poke operations read or write |
|
696 // values on the stack, without moving the current stack position. |
|
697 |
|
698 void pop(RegisterID dest) |
|
699 { |
|
700 // store postindexed with writeback |
|
701 m_assembler.ldr(dest, ARMRegisters::sp, sizeof(void*), false, true); |
|
702 } |
|
703 |
|
704 void push(RegisterID src) |
|
705 { |
|
706 // store preindexed with writeback |
|
707 m_assembler.str(src, ARMRegisters::sp, -sizeof(void*), true, true); |
|
708 } |
|
709 |
|
710 void push(Address address) |
|
711 { |
|
712 load32(address, dataTempRegister); |
|
713 push(dataTempRegister); |
|
714 } |
|
715 |
|
716 void push(Imm32 imm) |
|
717 { |
|
718 move(imm, dataTempRegister); |
|
719 push(dataTempRegister); |
|
720 } |
|
721 |
|
722 // Register move operations: |
|
723 // |
|
724 // Move values in registers. |
|
725 |
|
726 void move(Imm32 imm, RegisterID dest) |
|
727 { |
|
728 uint32_t value = imm.m_value; |
|
729 |
|
730 if (imm.m_isPointer) |
|
731 moveFixedWidthEncoding(imm, dest); |
|
732 else { |
|
733 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(value); |
|
734 |
|
735 if (armImm.isValid()) |
|
736 m_assembler.mov(dest, armImm); |
|
737 else if ((armImm = ARMThumbImmediate::makeEncodedImm(~value)).isValid()) |
|
738 m_assembler.mvn(dest, armImm); |
|
739 else { |
|
740 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(value)); |
|
741 if (value & 0xffff0000) |
|
742 m_assembler.movt(dest, ARMThumbImmediate::makeUInt16(value >> 16)); |
|
743 } |
|
744 } |
|
745 } |
|
746 |
|
747 void move(RegisterID src, RegisterID dest) |
|
748 { |
|
749 m_assembler.mov(dest, src); |
|
750 } |
|
751 |
|
752 void move(ImmPtr imm, RegisterID dest) |
|
753 { |
|
754 move(Imm32(imm), dest); |
|
755 } |
|
756 |
|
757 void swap(RegisterID reg1, RegisterID reg2) |
|
758 { |
|
759 move(reg1, dataTempRegister); |
|
760 move(reg2, reg1); |
|
761 move(dataTempRegister, reg2); |
|
762 } |
|
763 |
|
764 void signExtend32ToPtr(RegisterID src, RegisterID dest) |
|
765 { |
|
766 if (src != dest) |
|
767 move(src, dest); |
|
768 } |
|
769 |
|
770 void zeroExtend32ToPtr(RegisterID src, RegisterID dest) |
|
771 { |
|
772 if (src != dest) |
|
773 move(src, dest); |
|
774 } |
|
775 |
|
776 |
|
777 // Forwards / external control flow operations: |
|
778 // |
|
779 // This set of jump and conditional branch operations return a Jump |
|
780 // object which may linked at a later point, allow forwards jump, |
|
781 // or jumps that will require external linkage (after the code has been |
|
782 // relocated). |
|
783 // |
|
784 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge |
|
785 // respecitvely, for unsigned comparisons the names b, a, be, and ae are |
|
786 // used (representing the names 'below' and 'above'). |
|
787 // |
|
788 // Operands to the comparision are provided in the expected order, e.g. |
|
789 // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when |
|
790 // treated as a signed 32bit value, is less than or equal to 5. |
|
791 // |
|
792 // jz and jnz test whether the first operand is equal to zero, and take |
|
793 // an optional second operand of a mask under which to perform the test. |
|
794 private: |
|
795 |
|
796 // Should we be using TEQ for equal/not-equal? |
|
797 void compare32(RegisterID left, Imm32 right) |
|
798 { |
|
799 int32_t imm = right.m_value; |
|
800 if (!imm) |
|
801 m_assembler.tst(left, left); |
|
802 else { |
|
803 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm); |
|
804 if (armImm.isValid()) |
|
805 m_assembler.cmp(left, armImm); |
|
806 if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid()) |
|
807 m_assembler.cmn(left, armImm); |
|
808 else { |
|
809 move(Imm32(imm), dataTempRegister); |
|
810 m_assembler.cmp(left, dataTempRegister); |
|
811 } |
|
812 } |
|
813 } |
|
814 |
|
815 void test32(RegisterID reg, Imm32 mask) |
|
816 { |
|
817 int32_t imm = mask.m_value; |
|
818 |
|
819 if (imm == -1) |
|
820 m_assembler.tst(reg, reg); |
|
821 else { |
|
822 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm); |
|
823 if (armImm.isValid()) |
|
824 m_assembler.tst(reg, armImm); |
|
825 else { |
|
826 move(mask, dataTempRegister); |
|
827 m_assembler.tst(reg, dataTempRegister); |
|
828 } |
|
829 } |
|
830 } |
|
831 |
|
832 public: |
|
833 Jump branch32(Condition cond, RegisterID left, RegisterID right) |
|
834 { |
|
835 m_assembler.cmp(left, right); |
|
836 return Jump(makeBranch(cond)); |
|
837 } |
|
838 |
|
839 Jump branch32(Condition cond, RegisterID left, Imm32 right) |
|
840 { |
|
841 compare32(left, right); |
|
842 return Jump(makeBranch(cond)); |
|
843 } |
|
844 |
|
845 Jump branch32(Condition cond, RegisterID left, Address right) |
|
846 { |
|
847 load32(right, dataTempRegister); |
|
848 return branch32(cond, left, dataTempRegister); |
|
849 } |
|
850 |
|
851 Jump branch32(Condition cond, Address left, RegisterID right) |
|
852 { |
|
853 load32(left, dataTempRegister); |
|
854 return branch32(cond, dataTempRegister, right); |
|
855 } |
|
856 |
|
857 Jump branch32(Condition cond, Address left, Imm32 right) |
|
858 { |
|
859 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/ |
|
860 load32(left, addressTempRegister); |
|
861 return branch32(cond, addressTempRegister, right); |
|
862 } |
|
863 |
|
864 Jump branch32(Condition cond, BaseIndex left, Imm32 right) |
|
865 { |
|
866 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/ |
|
867 load32(left, addressTempRegister); |
|
868 return branch32(cond, addressTempRegister, right); |
|
869 } |
|
870 |
|
871 Jump branch32WithUnalignedHalfWords(Condition cond, BaseIndex left, Imm32 right) |
|
872 { |
|
873 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/ |
|
874 load32WithUnalignedHalfWords(left, addressTempRegister); |
|
875 return branch32(cond, addressTempRegister, right); |
|
876 } |
|
877 |
|
878 Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right) |
|
879 { |
|
880 load32(left.m_ptr, dataTempRegister); |
|
881 return branch32(cond, dataTempRegister, right); |
|
882 } |
|
883 |
|
884 Jump branch32(Condition cond, AbsoluteAddress left, Imm32 right) |
|
885 { |
|
886 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/ |
|
887 load32(left.m_ptr, addressTempRegister); |
|
888 return branch32(cond, addressTempRegister, right); |
|
889 } |
|
890 |
|
891 Jump branch16(Condition cond, BaseIndex left, RegisterID right) |
|
892 { |
|
893 load16(left, dataTempRegister); |
|
894 m_assembler.lsl(addressTempRegister, right, 16); |
|
895 m_assembler.lsl(dataTempRegister, dataTempRegister, 16); |
|
896 return branch32(cond, dataTempRegister, addressTempRegister); |
|
897 } |
|
898 |
|
899 Jump branch16(Condition cond, BaseIndex left, Imm32 right) |
|
900 { |
|
901 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/ |
|
902 load16(left, addressTempRegister); |
|
903 m_assembler.lsl(addressTempRegister, addressTempRegister, 16); |
|
904 return branch32(cond, addressTempRegister, Imm32(right.m_value << 16)); |
|
905 } |
|
906 |
|
907 Jump branch8(Condition cond, RegisterID left, Imm32 right) |
|
908 { |
|
909 compare32(left, right); |
|
910 return Jump(makeBranch(cond)); |
|
911 } |
|
912 |
|
913 Jump branch8(Condition cond, Address left, Imm32 right) |
|
914 { |
|
915 // use addressTempRegister incase the branch8 we call uses dataTempRegister. :-/ |
|
916 load8(left, addressTempRegister); |
|
917 return branch8(cond, addressTempRegister, right); |
|
918 } |
|
919 |
|
920 Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask) |
|
921 { |
|
922 ASSERT((cond == Zero) || (cond == NonZero)); |
|
923 m_assembler.tst(reg, mask); |
|
924 return Jump(makeBranch(cond)); |
|
925 } |
|
926 |
|
927 Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1)) |
|
928 { |
|
929 ASSERT((cond == Zero) || (cond == NonZero)); |
|
930 test32(reg, mask); |
|
931 return Jump(makeBranch(cond)); |
|
932 } |
|
933 |
|
934 Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1)) |
|
935 { |
|
936 ASSERT((cond == Zero) || (cond == NonZero)); |
|
937 // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/ |
|
938 load32(address, addressTempRegister); |
|
939 return branchTest32(cond, addressTempRegister, mask); |
|
940 } |
|
941 |
|
942 Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1)) |
|
943 { |
|
944 ASSERT((cond == Zero) || (cond == NonZero)); |
|
945 // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/ |
|
946 load32(address, addressTempRegister); |
|
947 return branchTest32(cond, addressTempRegister, mask); |
|
948 } |
|
949 |
|
950 Jump branchTest8(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1)) |
|
951 { |
|
952 ASSERT((cond == Zero) || (cond == NonZero)); |
|
953 test32(reg, mask); |
|
954 return Jump(makeBranch(cond)); |
|
955 } |
|
956 |
|
957 Jump branchTest8(Condition cond, Address address, Imm32 mask = Imm32(-1)) |
|
958 { |
|
959 ASSERT((cond == Zero) || (cond == NonZero)); |
|
960 // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/ |
|
961 load8(address, addressTempRegister); |
|
962 return branchTest8(cond, addressTempRegister, mask); |
|
963 } |
|
964 |
|
965 Jump jump() |
|
966 { |
|
967 return Jump(makeJump()); |
|
968 } |
|
969 |
|
970 void jump(RegisterID target) |
|
971 { |
|
972 m_assembler.bx(target); |
|
973 } |
|
974 |
|
975 // Address is a memory location containing the address to jump to |
|
976 void jump(Address address) |
|
977 { |
|
978 load32(address, dataTempRegister); |
|
979 m_assembler.bx(dataTempRegister); |
|
980 } |
|
981 |
|
982 |
|
983 // Arithmetic control flow operations: |
|
984 // |
|
985 // This set of conditional branch operations branch based |
|
986 // on the result of an arithmetic operation. The operation |
|
987 // is performed as normal, storing the result. |
|
988 // |
|
989 // * jz operations branch if the result is zero. |
|
990 // * jo operations branch if the (signed) arithmetic |
|
991 // operation caused an overflow to occur. |
|
992 |
|
993 Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest) |
|
994 { |
|
995 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); |
|
996 m_assembler.add_S(dest, dest, src); |
|
997 return Jump(makeBranch(cond)); |
|
998 } |
|
999 |
|
1000 Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest) |
|
1001 { |
|
1002 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); |
|
1003 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value); |
|
1004 if (armImm.isValid()) |
|
1005 m_assembler.add_S(dest, dest, armImm); |
|
1006 else { |
|
1007 move(imm, dataTempRegister); |
|
1008 m_assembler.add_S(dest, dest, dataTempRegister); |
|
1009 } |
|
1010 return Jump(makeBranch(cond)); |
|
1011 } |
|
1012 |
|
1013 Jump branchMul32(Condition cond, RegisterID src, RegisterID dest) |
|
1014 { |
|
1015 ASSERT(cond == Overflow); |
|
1016 m_assembler.smull(dest, dataTempRegister, dest, src); |
|
1017 m_assembler.asr(addressTempRegister, dest, 31); |
|
1018 return branch32(NotEqual, addressTempRegister, dataTempRegister); |
|
1019 } |
|
1020 |
|
1021 Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest) |
|
1022 { |
|
1023 ASSERT(cond == Overflow); |
|
1024 move(imm, dataTempRegister); |
|
1025 m_assembler.smull(dest, dataTempRegister, src, dataTempRegister); |
|
1026 m_assembler.asr(addressTempRegister, dest, 31); |
|
1027 return branch32(NotEqual, addressTempRegister, dataTempRegister); |
|
1028 } |
|
1029 |
|
1030 Jump branchOr32(Condition cond, RegisterID src, RegisterID dest) |
|
1031 { |
|
1032 ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero)); |
|
1033 m_assembler.orr_S(dest, dest, src); |
|
1034 return Jump(makeBranch(cond)); |
|
1035 } |
|
1036 |
|
1037 Jump branchSub32(Condition cond, RegisterID src, RegisterID dest) |
|
1038 { |
|
1039 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); |
|
1040 m_assembler.sub_S(dest, dest, src); |
|
1041 return Jump(makeBranch(cond)); |
|
1042 } |
|
1043 |
|
1044 Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest) |
|
1045 { |
|
1046 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); |
|
1047 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value); |
|
1048 if (armImm.isValid()) |
|
1049 m_assembler.sub_S(dest, dest, armImm); |
|
1050 else { |
|
1051 move(imm, dataTempRegister); |
|
1052 m_assembler.sub_S(dest, dest, dataTempRegister); |
|
1053 } |
|
1054 return Jump(makeBranch(cond)); |
|
1055 } |
|
1056 |
|
1057 |
|
1058 // Miscellaneous operations: |
|
1059 |
|
1060 void breakpoint() |
|
1061 { |
|
1062 m_assembler.bkpt(); |
|
1063 } |
|
1064 |
|
1065 Call nearCall() |
|
1066 { |
|
1067 moveFixedWidthEncoding(Imm32(0), dataTempRegister); |
|
1068 return Call(m_assembler.blx(dataTempRegister), Call::LinkableNear); |
|
1069 } |
|
1070 |
|
1071 Call call() |
|
1072 { |
|
1073 moveFixedWidthEncoding(Imm32(0), dataTempRegister); |
|
1074 return Call(m_assembler.blx(dataTempRegister), Call::Linkable); |
|
1075 } |
|
1076 |
|
1077 Call call(RegisterID target) |
|
1078 { |
|
1079 return Call(m_assembler.blx(target), Call::None); |
|
1080 } |
|
1081 |
|
1082 Call call(Address address) |
|
1083 { |
|
1084 load32(address, dataTempRegister); |
|
1085 return Call(m_assembler.blx(dataTempRegister), Call::None); |
|
1086 } |
|
1087 |
|
1088 void ret() |
|
1089 { |
|
1090 m_assembler.bx(linkRegister); |
|
1091 } |
|
1092 |
|
1093 void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest) |
|
1094 { |
|
1095 m_assembler.cmp(left, right); |
|
1096 m_assembler.it(armV7Condition(cond), false); |
|
1097 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1)); |
|
1098 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0)); |
|
1099 } |
|
1100 |
|
1101 void set32(Condition cond, Address left, RegisterID right, RegisterID dest) |
|
1102 { |
|
1103 load32(left, dataTempRegister); |
|
1104 set32(cond, dataTempRegister, right, dest); |
|
1105 } |
|
1106 |
|
1107 void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest) |
|
1108 { |
|
1109 compare32(left, right); |
|
1110 m_assembler.it(armV7Condition(cond), false); |
|
1111 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1)); |
|
1112 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0)); |
|
1113 } |
|
1114 |
|
1115 void set8(Condition cond, RegisterID left, RegisterID right, RegisterID dest) |
|
1116 { |
|
1117 set32(cond, left, right, dest); |
|
1118 } |
|
1119 |
|
1120 void set8(Condition cond, Address left, RegisterID right, RegisterID dest) |
|
1121 { |
|
1122 set32(cond, left, right, dest); |
|
1123 } |
|
1124 |
|
1125 void set8(Condition cond, RegisterID left, Imm32 right, RegisterID dest) |
|
1126 { |
|
1127 set32(cond, left, right, dest); |
|
1128 } |
|
1129 |
|
1130 // FIXME: |
|
1131 // The mask should be optional... paerhaps the argument order should be |
|
1132 // dest-src, operations always have a dest? ... possibly not true, considering |
|
1133 // asm ops like test, or pseudo ops like pop(). |
|
1134 void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest) |
|
1135 { |
|
1136 load32(address, dataTempRegister); |
|
1137 test32(dataTempRegister, mask); |
|
1138 m_assembler.it(armV7Condition(cond), false); |
|
1139 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1)); |
|
1140 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0)); |
|
1141 } |
|
1142 |
|
1143 void setTest8(Condition cond, Address address, Imm32 mask, RegisterID dest) |
|
1144 { |
|
1145 load8(address, dataTempRegister); |
|
1146 test32(dataTempRegister, mask); |
|
1147 m_assembler.it(armV7Condition(cond), false); |
|
1148 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1)); |
|
1149 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0)); |
|
1150 } |
|
1151 |
|
1152 DataLabel32 moveWithPatch(Imm32 imm, RegisterID dst) |
|
1153 { |
|
1154 moveFixedWidthEncoding(imm, dst); |
|
1155 return DataLabel32(this); |
|
1156 } |
|
1157 |
|
1158 DataLabelPtr moveWithPatch(ImmPtr imm, RegisterID dst) |
|
1159 { |
|
1160 moveFixedWidthEncoding(Imm32(imm), dst); |
|
1161 return DataLabelPtr(this); |
|
1162 } |
|
1163 |
|
1164 Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0)) |
|
1165 { |
|
1166 dataLabel = moveWithPatch(initialRightValue, dataTempRegister); |
|
1167 return branch32(cond, left, dataTempRegister); |
|
1168 } |
|
1169 |
|
1170 Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0)) |
|
1171 { |
|
1172 load32(left, addressTempRegister); |
|
1173 dataLabel = moveWithPatch(initialRightValue, dataTempRegister); |
|
1174 return branch32(cond, addressTempRegister, dataTempRegister); |
|
1175 } |
|
1176 |
|
1177 DataLabelPtr storePtrWithPatch(ImmPtr initialValue, ImplicitAddress address) |
|
1178 { |
|
1179 DataLabelPtr label = moveWithPatch(initialValue, dataTempRegister); |
|
1180 store32(dataTempRegister, address); |
|
1181 return label; |
|
1182 } |
|
1183 DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(ImmPtr(0), address); } |
|
1184 |
|
1185 |
|
1186 Call tailRecursiveCall() |
|
1187 { |
|
1188 // Like a normal call, but don't link. |
|
1189 moveFixedWidthEncoding(Imm32(0), dataTempRegister); |
|
1190 return Call(m_assembler.bx(dataTempRegister), Call::Linkable); |
|
1191 } |
|
1192 |
|
1193 Call makeTailRecursiveCall(Jump oldJump) |
|
1194 { |
|
1195 oldJump.link(this); |
|
1196 return tailRecursiveCall(); |
|
1197 } |
|
1198 |
|
1199 |
|
1200 protected: |
|
1201 ARMv7Assembler::JmpSrc makeJump() |
|
1202 { |
|
1203 moveFixedWidthEncoding(Imm32(0), dataTempRegister); |
|
1204 return m_assembler.bx(dataTempRegister); |
|
1205 } |
|
1206 |
|
1207 ARMv7Assembler::JmpSrc makeBranch(ARMv7Assembler::Condition cond) |
|
1208 { |
|
1209 m_assembler.it(cond, true, true); |
|
1210 moveFixedWidthEncoding(Imm32(0), dataTempRegister); |
|
1211 return m_assembler.bx(dataTempRegister); |
|
1212 } |
|
1213 ARMv7Assembler::JmpSrc makeBranch(Condition cond) { return makeBranch(armV7Condition(cond)); } |
|
1214 ARMv7Assembler::JmpSrc makeBranch(DoubleCondition cond) { return makeBranch(armV7Condition(cond)); } |
|
1215 |
|
1216 ArmAddress setupArmAddress(BaseIndex address) |
|
1217 { |
|
1218 if (address.offset) { |
|
1219 ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset); |
|
1220 if (imm.isValid()) |
|
1221 m_assembler.add(addressTempRegister, address.base, imm); |
|
1222 else { |
|
1223 move(Imm32(address.offset), addressTempRegister); |
|
1224 m_assembler.add(addressTempRegister, addressTempRegister, address.base); |
|
1225 } |
|
1226 |
|
1227 return ArmAddress(addressTempRegister, address.index, address.scale); |
|
1228 } else |
|
1229 return ArmAddress(address.base, address.index, address.scale); |
|
1230 } |
|
1231 |
|
1232 ArmAddress setupArmAddress(Address address) |
|
1233 { |
|
1234 if ((address.offset >= -0xff) && (address.offset <= 0xfff)) |
|
1235 return ArmAddress(address.base, address.offset); |
|
1236 |
|
1237 move(Imm32(address.offset), addressTempRegister); |
|
1238 return ArmAddress(address.base, addressTempRegister); |
|
1239 } |
|
1240 |
|
1241 ArmAddress setupArmAddress(ImplicitAddress address) |
|
1242 { |
|
1243 if ((address.offset >= -0xff) && (address.offset <= 0xfff)) |
|
1244 return ArmAddress(address.base, address.offset); |
|
1245 |
|
1246 move(Imm32(address.offset), addressTempRegister); |
|
1247 return ArmAddress(address.base, addressTempRegister); |
|
1248 } |
|
1249 |
|
1250 RegisterID makeBaseIndexBase(BaseIndex address) |
|
1251 { |
|
1252 if (!address.offset) |
|
1253 return address.base; |
|
1254 |
|
1255 ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset); |
|
1256 if (imm.isValid()) |
|
1257 m_assembler.add(addressTempRegister, address.base, imm); |
|
1258 else { |
|
1259 move(Imm32(address.offset), addressTempRegister); |
|
1260 m_assembler.add(addressTempRegister, addressTempRegister, address.base); |
|
1261 } |
|
1262 |
|
1263 return addressTempRegister; |
|
1264 } |
|
1265 |
|
1266 void moveFixedWidthEncoding(Imm32 imm, RegisterID dst) |
|
1267 { |
|
1268 uint32_t value = imm.m_value; |
|
1269 m_assembler.movT3(dst, ARMThumbImmediate::makeUInt16(value & 0xffff)); |
|
1270 m_assembler.movt(dst, ARMThumbImmediate::makeUInt16(value >> 16)); |
|
1271 } |
|
1272 |
|
1273 ARMv7Assembler::Condition armV7Condition(Condition cond) |
|
1274 { |
|
1275 return static_cast<ARMv7Assembler::Condition>(cond); |
|
1276 } |
|
1277 |
|
1278 ARMv7Assembler::Condition armV7Condition(DoubleCondition cond) |
|
1279 { |
|
1280 return static_cast<ARMv7Assembler::Condition>(cond); |
|
1281 } |
|
1282 |
|
1283 private: |
|
1284 friend class LinkBuffer; |
|
1285 friend class RepatchBuffer; |
|
1286 |
|
1287 static void linkCall(void* code, Call call, FunctionPtr function) |
|
1288 { |
|
1289 ARMv7Assembler::linkCall(code, call.m_jmp, function.value()); |
|
1290 } |
|
1291 |
|
1292 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) |
|
1293 { |
|
1294 ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress()); |
|
1295 } |
|
1296 |
|
1297 static void repatchCall(CodeLocationCall call, FunctionPtr destination) |
|
1298 { |
|
1299 ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress()); |
|
1300 } |
|
1301 }; |
|
1302 |
|
1303 } // namespace JSC |
|
1304 |
|
1305 #endif // ENABLE(ASSEMBLER) |
|
1306 |
|
1307 #endif // MacroAssemblerARMv7_h |