|
1 /* |
|
2 * Copyright (C) 2009 Apple Inc. All rights reserved. |
|
3 * Copyright (C) 2010 University of Szeged |
|
4 * |
|
5 * Redistribution and use in source and binary forms, with or without |
|
6 * modification, are permitted provided that the following conditions |
|
7 * are met: |
|
8 * 1. Redistributions of source code must retain the above copyright |
|
9 * notice, this list of conditions and the following disclaimer. |
|
10 * 2. Redistributions in binary form must reproduce the above copyright |
|
11 * notice, this list of conditions and the following disclaimer in the |
|
12 * documentation and/or other materials provided with the distribution. |
|
13 * |
|
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
|
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
|
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
|
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
|
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
|
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
|
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
|
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
|
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
|
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|
25 */ |
|
26 |
|
27 #ifndef ARMAssembler_h |
|
28 #define ARMAssembler_h |
|
29 |
|
30 #if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2) |
|
31 |
|
32 #include "AssemblerBuffer.h" |
|
33 #include <wtf/Assertions.h> |
|
34 #include <wtf/Vector.h> |
|
35 #include <stdint.h> |
|
36 |
|
37 namespace JSC { |
|
38 |
|
39 namespace ARMRegisters { |
|
40 typedef enum { |
|
41 r0, |
|
42 r1, |
|
43 r2, |
|
44 r3, |
|
45 r4, |
|
46 r5, |
|
47 r6, |
|
48 r7, wr = r7, // thumb work register |
|
49 r8, |
|
50 r9, sb = r9, // static base |
|
51 r10, sl = r10, // stack limit |
|
52 r11, fp = r11, // frame pointer |
|
53 r12, ip = r12, |
|
54 r13, sp = r13, |
|
55 r14, lr = r14, |
|
56 r15, pc = r15, |
|
57 } RegisterID; |
|
58 |
|
59 typedef enum { |
|
60 s0, |
|
61 s1, |
|
62 s2, |
|
63 s3, |
|
64 s4, |
|
65 s5, |
|
66 s6, |
|
67 s7, |
|
68 s8, |
|
69 s9, |
|
70 s10, |
|
71 s11, |
|
72 s12, |
|
73 s13, |
|
74 s14, |
|
75 s15, |
|
76 s16, |
|
77 s17, |
|
78 s18, |
|
79 s19, |
|
80 s20, |
|
81 s21, |
|
82 s22, |
|
83 s23, |
|
84 s24, |
|
85 s25, |
|
86 s26, |
|
87 s27, |
|
88 s28, |
|
89 s29, |
|
90 s30, |
|
91 s31, |
|
92 } FPSingleRegisterID; |
|
93 |
|
94 typedef enum { |
|
95 d0, |
|
96 d1, |
|
97 d2, |
|
98 d3, |
|
99 d4, |
|
100 d5, |
|
101 d6, |
|
102 d7, |
|
103 d8, |
|
104 d9, |
|
105 d10, |
|
106 d11, |
|
107 d12, |
|
108 d13, |
|
109 d14, |
|
110 d15, |
|
111 d16, |
|
112 d17, |
|
113 d18, |
|
114 d19, |
|
115 d20, |
|
116 d21, |
|
117 d22, |
|
118 d23, |
|
119 d24, |
|
120 d25, |
|
121 d26, |
|
122 d27, |
|
123 d28, |
|
124 d29, |
|
125 d30, |
|
126 d31, |
|
127 } FPDoubleRegisterID; |
|
128 |
|
129 typedef enum { |
|
130 q0, |
|
131 q1, |
|
132 q2, |
|
133 q3, |
|
134 q4, |
|
135 q5, |
|
136 q6, |
|
137 q7, |
|
138 q8, |
|
139 q9, |
|
140 q10, |
|
141 q11, |
|
142 q12, |
|
143 q13, |
|
144 q14, |
|
145 q15, |
|
146 q16, |
|
147 q17, |
|
148 q18, |
|
149 q19, |
|
150 q20, |
|
151 q21, |
|
152 q22, |
|
153 q23, |
|
154 q24, |
|
155 q25, |
|
156 q26, |
|
157 q27, |
|
158 q28, |
|
159 q29, |
|
160 q30, |
|
161 q31, |
|
162 } FPQuadRegisterID; |
|
163 |
|
164 inline FPSingleRegisterID asSingle(FPDoubleRegisterID reg) |
|
165 { |
|
166 ASSERT(reg < d16); |
|
167 return (FPSingleRegisterID)(reg << 1); |
|
168 } |
|
169 |
|
170 inline FPDoubleRegisterID asDouble(FPSingleRegisterID reg) |
|
171 { |
|
172 ASSERT(!(reg & 1)); |
|
173 return (FPDoubleRegisterID)(reg >> 1); |
|
174 } |
|
175 } |
|
176 |
|
177 class ARMv7Assembler; |
|
178 class ARMThumbImmediate { |
|
179 friend class ARMv7Assembler; |
|
180 |
|
181 typedef uint8_t ThumbImmediateType; |
|
182 static const ThumbImmediateType TypeInvalid = 0; |
|
183 static const ThumbImmediateType TypeEncoded = 1; |
|
184 static const ThumbImmediateType TypeUInt16 = 2; |
|
185 |
|
186 typedef union { |
|
187 int16_t asInt; |
|
188 struct { |
|
189 unsigned imm8 : 8; |
|
190 unsigned imm3 : 3; |
|
191 unsigned i : 1; |
|
192 unsigned imm4 : 4; |
|
193 }; |
|
194 // If this is an encoded immediate, then it may describe a shift, or a pattern. |
|
195 struct { |
|
196 unsigned shiftValue7 : 7; |
|
197 unsigned shiftAmount : 5; |
|
198 }; |
|
199 struct { |
|
200 unsigned immediate : 8; |
|
201 unsigned pattern : 4; |
|
202 }; |
|
203 } ThumbImmediateValue; |
|
204 |
|
205 // byte0 contains least significant bit; not using an array to make client code endian agnostic. |
|
206 typedef union { |
|
207 int32_t asInt; |
|
208 struct { |
|
209 uint8_t byte0; |
|
210 uint8_t byte1; |
|
211 uint8_t byte2; |
|
212 uint8_t byte3; |
|
213 }; |
|
214 } PatternBytes; |
|
215 |
|
216 ALWAYS_INLINE static void countLeadingZerosPartial(uint32_t& value, int32_t& zeros, const int N) |
|
217 { |
|
218 if (value & ~((1 << N) - 1)) /* check for any of the top N bits (of 2N bits) are set */ |
|
219 value >>= N; /* if any were set, lose the bottom N */ |
|
220 else /* if none of the top N bits are set, */ |
|
221 zeros += N; /* then we have identified N leading zeros */ |
|
222 } |
|
223 |
|
224 static int32_t countLeadingZeros(uint32_t value) |
|
225 { |
|
226 if (!value) |
|
227 return 32; |
|
228 |
|
229 int32_t zeros = 0; |
|
230 countLeadingZerosPartial(value, zeros, 16); |
|
231 countLeadingZerosPartial(value, zeros, 8); |
|
232 countLeadingZerosPartial(value, zeros, 4); |
|
233 countLeadingZerosPartial(value, zeros, 2); |
|
234 countLeadingZerosPartial(value, zeros, 1); |
|
235 return zeros; |
|
236 } |
|
237 |
|
238 ARMThumbImmediate() |
|
239 : m_type(TypeInvalid) |
|
240 { |
|
241 m_value.asInt = 0; |
|
242 } |
|
243 |
|
244 ARMThumbImmediate(ThumbImmediateType type, ThumbImmediateValue value) |
|
245 : m_type(type) |
|
246 , m_value(value) |
|
247 { |
|
248 } |
|
249 |
|
250 ARMThumbImmediate(ThumbImmediateType type, uint16_t value) |
|
251 : m_type(TypeUInt16) |
|
252 { |
|
253 // Make sure this constructor is only reached with type TypeUInt16; |
|
254 // this extra parameter makes the code a little clearer by making it |
|
255 // explicit at call sites which type is being constructed |
|
256 ASSERT_UNUSED(type, type == TypeUInt16); |
|
257 |
|
258 m_value.asInt = value; |
|
259 } |
|
260 |
|
261 public: |
|
262 static ARMThumbImmediate makeEncodedImm(uint32_t value) |
|
263 { |
|
264 ThumbImmediateValue encoding; |
|
265 encoding.asInt = 0; |
|
266 |
|
267 // okay, these are easy. |
|
268 if (value < 256) { |
|
269 encoding.immediate = value; |
|
270 encoding.pattern = 0; |
|
271 return ARMThumbImmediate(TypeEncoded, encoding); |
|
272 } |
|
273 |
|
274 int32_t leadingZeros = countLeadingZeros(value); |
|
275 // if there were 24 or more leading zeros, then we'd have hit the (value < 256) case. |
|
276 ASSERT(leadingZeros < 24); |
|
277 |
|
278 // Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32, |
|
279 // Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for |
|
280 // zero. count(B) == 8, so the count of bits to be checked is 24 - count(Z). |
|
281 int32_t rightShiftAmount = 24 - leadingZeros; |
|
282 if (value == ((value >> rightShiftAmount) << rightShiftAmount)) { |
|
283 // Shift the value down to the low byte position. The assign to |
|
284 // shiftValue7 drops the implicit top bit. |
|
285 encoding.shiftValue7 = value >> rightShiftAmount; |
|
286 // The endoded shift amount is the magnitude of a right rotate. |
|
287 encoding.shiftAmount = 8 + leadingZeros; |
|
288 return ARMThumbImmediate(TypeEncoded, encoding); |
|
289 } |
|
290 |
|
291 PatternBytes bytes; |
|
292 bytes.asInt = value; |
|
293 |
|
294 if ((bytes.byte0 == bytes.byte1) && (bytes.byte0 == bytes.byte2) && (bytes.byte0 == bytes.byte3)) { |
|
295 encoding.immediate = bytes.byte0; |
|
296 encoding.pattern = 3; |
|
297 return ARMThumbImmediate(TypeEncoded, encoding); |
|
298 } |
|
299 |
|
300 if ((bytes.byte0 == bytes.byte2) && !(bytes.byte1 | bytes.byte3)) { |
|
301 encoding.immediate = bytes.byte0; |
|
302 encoding.pattern = 1; |
|
303 return ARMThumbImmediate(TypeEncoded, encoding); |
|
304 } |
|
305 |
|
306 if ((bytes.byte1 == bytes.byte3) && !(bytes.byte0 | bytes.byte2)) { |
|
307 encoding.immediate = bytes.byte0; |
|
308 encoding.pattern = 2; |
|
309 return ARMThumbImmediate(TypeEncoded, encoding); |
|
310 } |
|
311 |
|
312 return ARMThumbImmediate(); |
|
313 } |
|
314 |
|
315 static ARMThumbImmediate makeUInt12(int32_t value) |
|
316 { |
|
317 return (!(value & 0xfffff000)) |
|
318 ? ARMThumbImmediate(TypeUInt16, (uint16_t)value) |
|
319 : ARMThumbImmediate(); |
|
320 } |
|
321 |
|
322 static ARMThumbImmediate makeUInt12OrEncodedImm(int32_t value) |
|
323 { |
|
324 // If this is not a 12-bit unsigned it, try making an encoded immediate. |
|
325 return (!(value & 0xfffff000)) |
|
326 ? ARMThumbImmediate(TypeUInt16, (uint16_t)value) |
|
327 : makeEncodedImm(value); |
|
328 } |
|
329 |
|
330 // The 'make' methods, above, return a !isValid() value if the argument |
|
331 // cannot be represented as the requested type. This methods is called |
|
332 // 'get' since the argument can always be represented. |
|
333 static ARMThumbImmediate makeUInt16(uint16_t value) |
|
334 { |
|
335 return ARMThumbImmediate(TypeUInt16, value); |
|
336 } |
|
337 |
|
338 bool isValid() |
|
339 { |
|
340 return m_type != TypeInvalid; |
|
341 } |
|
342 |
|
343 // These methods rely on the format of encoded byte values. |
|
344 bool isUInt3() { return !(m_value.asInt & 0xfff8); } |
|
345 bool isUInt4() { return !(m_value.asInt & 0xfff0); } |
|
346 bool isUInt5() { return !(m_value.asInt & 0xffe0); } |
|
347 bool isUInt6() { return !(m_value.asInt & 0xffc0); } |
|
348 bool isUInt7() { return !(m_value.asInt & 0xff80); } |
|
349 bool isUInt8() { return !(m_value.asInt & 0xff00); } |
|
350 bool isUInt9() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfe00); } |
|
351 bool isUInt10() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfc00); } |
|
352 bool isUInt12() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xf000); } |
|
353 bool isUInt16() { return m_type == TypeUInt16; } |
|
354 uint8_t getUInt3() { ASSERT(isUInt3()); return m_value.asInt; } |
|
355 uint8_t getUInt4() { ASSERT(isUInt4()); return m_value.asInt; } |
|
356 uint8_t getUInt5() { ASSERT(isUInt5()); return m_value.asInt; } |
|
357 uint8_t getUInt6() { ASSERT(isUInt6()); return m_value.asInt; } |
|
358 uint8_t getUInt7() { ASSERT(isUInt7()); return m_value.asInt; } |
|
359 uint8_t getUInt8() { ASSERT(isUInt8()); return m_value.asInt; } |
|
360 uint8_t getUInt9() { ASSERT(isUInt9()); return m_value.asInt; } |
|
361 uint8_t getUInt10() { ASSERT(isUInt10()); return m_value.asInt; } |
|
362 uint16_t getUInt12() { ASSERT(isUInt12()); return m_value.asInt; } |
|
363 uint16_t getUInt16() { ASSERT(isUInt16()); return m_value.asInt; } |
|
364 |
|
365 bool isEncodedImm() { return m_type == TypeEncoded; } |
|
366 |
|
367 private: |
|
368 ThumbImmediateType m_type; |
|
369 ThumbImmediateValue m_value; |
|
370 }; |
|
371 |
|
372 class VFPImmediate { |
|
373 public: |
|
374 VFPImmediate(double d) |
|
375 : m_value(-1) |
|
376 { |
|
377 union { |
|
378 uint64_t i; |
|
379 double d; |
|
380 } u; |
|
381 |
|
382 u.d = d; |
|
383 |
|
384 int sign = (u.i >> 63); |
|
385 int exponent = (u.i >> 52) & 0x7ff; |
|
386 uint64_t mantissa = u.i & 0x000fffffffffffffull; |
|
387 |
|
388 if ((exponent >= 0x3fc) && (exponent <= 0x403) && !(mantissa & 0x0000ffffffffffffull)) |
|
389 m_value = (sign << 7) | ((exponent & 7) << 4) | (int)(mantissa >> 48); |
|
390 } |
|
391 |
|
392 bool isValid() |
|
393 { |
|
394 return m_value != -1; |
|
395 } |
|
396 |
|
397 uint8_t value() |
|
398 { |
|
399 return (uint8_t)m_value; |
|
400 } |
|
401 |
|
402 private: |
|
403 int m_value; |
|
404 }; |
|
405 |
|
406 typedef enum { |
|
407 SRType_LSL, |
|
408 SRType_LSR, |
|
409 SRType_ASR, |
|
410 SRType_ROR, |
|
411 |
|
412 SRType_RRX = SRType_ROR |
|
413 } ARMShiftType; |
|
414 |
|
415 class ARMv7Assembler; |
|
416 class ShiftTypeAndAmount { |
|
417 friend class ARMv7Assembler; |
|
418 |
|
419 public: |
|
420 ShiftTypeAndAmount() |
|
421 { |
|
422 m_u.type = (ARMShiftType)0; |
|
423 m_u.amount = 0; |
|
424 } |
|
425 |
|
426 ShiftTypeAndAmount(ARMShiftType type, unsigned amount) |
|
427 { |
|
428 m_u.type = type; |
|
429 m_u.amount = amount & 31; |
|
430 } |
|
431 |
|
432 unsigned lo4() { return m_u.lo4; } |
|
433 unsigned hi4() { return m_u.hi4; } |
|
434 |
|
435 private: |
|
436 union { |
|
437 struct { |
|
438 unsigned lo4 : 4; |
|
439 unsigned hi4 : 4; |
|
440 }; |
|
441 struct { |
|
442 unsigned type : 2; |
|
443 unsigned amount : 5; |
|
444 }; |
|
445 } m_u; |
|
446 }; |
|
447 |
|
448 |
|
449 class ARMv7Assembler { |
|
450 public: |
|
451 ~ARMv7Assembler() |
|
452 { |
|
453 ASSERT(m_jumpsToLink.isEmpty()); |
|
454 } |
|
455 |
|
456 typedef ARMRegisters::RegisterID RegisterID; |
|
457 typedef ARMRegisters::FPSingleRegisterID FPSingleRegisterID; |
|
458 typedef ARMRegisters::FPDoubleRegisterID FPDoubleRegisterID; |
|
459 typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID; |
|
460 |
|
461 // (HS, LO, HI, LS) -> (AE, B, A, BE) |
|
462 // (VS, VC) -> (O, NO) |
|
463 typedef enum { |
|
464 ConditionEQ, |
|
465 ConditionNE, |
|
466 ConditionHS, |
|
467 ConditionLO, |
|
468 ConditionMI, |
|
469 ConditionPL, |
|
470 ConditionVS, |
|
471 ConditionVC, |
|
472 ConditionHI, |
|
473 ConditionLS, |
|
474 ConditionGE, |
|
475 ConditionLT, |
|
476 ConditionGT, |
|
477 ConditionLE, |
|
478 ConditionAL, |
|
479 |
|
480 ConditionCS = ConditionHS, |
|
481 ConditionCC = ConditionLO, |
|
482 } Condition; |
|
483 |
|
484 class JmpSrc { |
|
485 friend class ARMv7Assembler; |
|
486 friend class ARMInstructionFormatter; |
|
487 public: |
|
488 JmpSrc() |
|
489 : m_offset(-1) |
|
490 { |
|
491 } |
|
492 |
|
493 private: |
|
494 JmpSrc(int offset) |
|
495 : m_offset(offset) |
|
496 { |
|
497 } |
|
498 |
|
499 int m_offset; |
|
500 }; |
|
501 |
|
502 class JmpDst { |
|
503 friend class ARMv7Assembler; |
|
504 friend class ARMInstructionFormatter; |
|
505 public: |
|
506 JmpDst() |
|
507 : m_offset(-1) |
|
508 , m_used(false) |
|
509 { |
|
510 } |
|
511 |
|
512 bool isUsed() const { return m_used; } |
|
513 void used() { m_used = true; } |
|
514 private: |
|
515 JmpDst(int offset) |
|
516 : m_offset(offset) |
|
517 , m_used(false) |
|
518 { |
|
519 ASSERT(m_offset == offset); |
|
520 } |
|
521 |
|
522 int m_offset : 31; |
|
523 int m_used : 1; |
|
524 }; |
|
525 |
|
526 private: |
|
527 |
|
528 struct LinkRecord { |
|
529 LinkRecord(intptr_t from, intptr_t to) |
|
530 : from(from) |
|
531 , to(to) |
|
532 { |
|
533 } |
|
534 |
|
535 intptr_t from; |
|
536 intptr_t to; |
|
537 }; |
|
538 |
|
539 // ARMv7, Appx-A.6.3 |
|
540 bool BadReg(RegisterID reg) |
|
541 { |
|
542 return (reg == ARMRegisters::sp) || (reg == ARMRegisters::pc); |
|
543 } |
|
544 |
|
545 uint32_t singleRegisterMask(FPSingleRegisterID rdNum, int highBitsShift, int lowBitShift) |
|
546 { |
|
547 uint32_t rdMask = (rdNum >> 1) << highBitsShift; |
|
548 if (rdNum & 1) |
|
549 rdMask |= 1 << lowBitShift; |
|
550 return rdMask; |
|
551 } |
|
552 |
|
553 uint32_t doubleRegisterMask(FPDoubleRegisterID rdNum, int highBitShift, int lowBitsShift) |
|
554 { |
|
555 uint32_t rdMask = (rdNum & 0xf) << lowBitsShift; |
|
556 if (rdNum & 16) |
|
557 rdMask |= 1 << highBitShift; |
|
558 return rdMask; |
|
559 } |
|
560 |
|
561 typedef enum { |
|
562 OP_ADD_reg_T1 = 0x1800, |
|
563 OP_SUB_reg_T1 = 0x1A00, |
|
564 OP_ADD_imm_T1 = 0x1C00, |
|
565 OP_SUB_imm_T1 = 0x1E00, |
|
566 OP_MOV_imm_T1 = 0x2000, |
|
567 OP_CMP_imm_T1 = 0x2800, |
|
568 OP_ADD_imm_T2 = 0x3000, |
|
569 OP_SUB_imm_T2 = 0x3800, |
|
570 OP_AND_reg_T1 = 0x4000, |
|
571 OP_EOR_reg_T1 = 0x4040, |
|
572 OP_TST_reg_T1 = 0x4200, |
|
573 OP_RSB_imm_T1 = 0x4240, |
|
574 OP_CMP_reg_T1 = 0x4280, |
|
575 OP_ORR_reg_T1 = 0x4300, |
|
576 OP_MVN_reg_T1 = 0x43C0, |
|
577 OP_ADD_reg_T2 = 0x4400, |
|
578 OP_MOV_reg_T1 = 0x4600, |
|
579 OP_BLX = 0x4700, |
|
580 OP_BX = 0x4700, |
|
581 OP_STR_reg_T1 = 0x5000, |
|
582 OP_LDR_reg_T1 = 0x5800, |
|
583 OP_LDRH_reg_T1 = 0x5A00, |
|
584 OP_LDRB_reg_T1 = 0x5C00, |
|
585 OP_STR_imm_T1 = 0x6000, |
|
586 OP_LDR_imm_T1 = 0x6800, |
|
587 OP_LDRB_imm_T1 = 0x7800, |
|
588 OP_LDRH_imm_T1 = 0x8800, |
|
589 OP_STR_imm_T2 = 0x9000, |
|
590 OP_LDR_imm_T2 = 0x9800, |
|
591 OP_ADD_SP_imm_T1 = 0xA800, |
|
592 OP_ADD_SP_imm_T2 = 0xB000, |
|
593 OP_SUB_SP_imm_T1 = 0xB080, |
|
594 OP_BKPT = 0xBE00, |
|
595 OP_IT = 0xBF00, |
|
596 OP_NOP_T1 = 0xBF00, |
|
597 } OpcodeID; |
|
598 |
|
599 typedef enum { |
|
600 OP_AND_reg_T2 = 0xEA00, |
|
601 OP_TST_reg_T2 = 0xEA10, |
|
602 OP_ORR_reg_T2 = 0xEA40, |
|
603 OP_ORR_S_reg_T2 = 0xEA50, |
|
604 OP_ASR_imm_T1 = 0xEA4F, |
|
605 OP_LSL_imm_T1 = 0xEA4F, |
|
606 OP_LSR_imm_T1 = 0xEA4F, |
|
607 OP_ROR_imm_T1 = 0xEA4F, |
|
608 OP_MVN_reg_T2 = 0xEA6F, |
|
609 OP_EOR_reg_T2 = 0xEA80, |
|
610 OP_ADD_reg_T3 = 0xEB00, |
|
611 OP_ADD_S_reg_T3 = 0xEB10, |
|
612 OP_SUB_reg_T2 = 0xEBA0, |
|
613 OP_SUB_S_reg_T2 = 0xEBB0, |
|
614 OP_CMP_reg_T2 = 0xEBB0, |
|
615 OP_VSTR = 0xED00, |
|
616 OP_VLDR = 0xED10, |
|
617 OP_VMOV_StoC = 0xEE00, |
|
618 OP_VMOV_CtoS = 0xEE10, |
|
619 OP_VMUL_T2 = 0xEE20, |
|
620 OP_VADD_T2 = 0xEE30, |
|
621 OP_VSUB_T2 = 0xEE30, |
|
622 OP_VDIV = 0xEE80, |
|
623 OP_VCMP_T1 = 0xEEB0, |
|
624 OP_VCVT_FPIVFP = 0xEEB0, |
|
625 OP_VMOV_IMM_T2 = 0xEEB0, |
|
626 OP_VMRS = 0xEEB0, |
|
627 OP_B_T4a = 0xF000, |
|
628 OP_AND_imm_T1 = 0xF000, |
|
629 OP_TST_imm = 0xF010, |
|
630 OP_ORR_imm_T1 = 0xF040, |
|
631 OP_MOV_imm_T2 = 0xF040, |
|
632 OP_MVN_imm = 0xF060, |
|
633 OP_EOR_imm_T1 = 0xF080, |
|
634 OP_ADD_imm_T3 = 0xF100, |
|
635 OP_ADD_S_imm_T3 = 0xF110, |
|
636 OP_CMN_imm = 0xF110, |
|
637 OP_SUB_imm_T3 = 0xF1A0, |
|
638 OP_SUB_S_imm_T3 = 0xF1B0, |
|
639 OP_CMP_imm_T2 = 0xF1B0, |
|
640 OP_RSB_imm_T2 = 0xF1C0, |
|
641 OP_ADD_imm_T4 = 0xF200, |
|
642 OP_MOV_imm_T3 = 0xF240, |
|
643 OP_SUB_imm_T4 = 0xF2A0, |
|
644 OP_MOVT = 0xF2C0, |
|
645 OP_NOP_T2a = 0xF3AF, |
|
646 OP_LDRB_imm_T3 = 0xF810, |
|
647 OP_LDRB_reg_T2 = 0xF810, |
|
648 OP_LDRH_reg_T2 = 0xF830, |
|
649 OP_LDRH_imm_T3 = 0xF830, |
|
650 OP_STR_imm_T4 = 0xF840, |
|
651 OP_STR_reg_T2 = 0xF840, |
|
652 OP_LDR_imm_T4 = 0xF850, |
|
653 OP_LDR_reg_T2 = 0xF850, |
|
654 OP_LDRB_imm_T2 = 0xF890, |
|
655 OP_LDRH_imm_T2 = 0xF8B0, |
|
656 OP_STR_imm_T3 = 0xF8C0, |
|
657 OP_LDR_imm_T3 = 0xF8D0, |
|
658 OP_LSL_reg_T2 = 0xFA00, |
|
659 OP_LSR_reg_T2 = 0xFA20, |
|
660 OP_ASR_reg_T2 = 0xFA40, |
|
661 OP_ROR_reg_T2 = 0xFA60, |
|
662 OP_SMULL_T1 = 0xFB80, |
|
663 } OpcodeID1; |
|
664 |
|
665 typedef enum { |
|
666 OP_VADD_T2b = 0x0A00, |
|
667 OP_VDIVb = 0x0A00, |
|
668 OP_VLDRb = 0x0A00, |
|
669 OP_VMOV_IMM_T2b = 0x0A00, |
|
670 OP_VMUL_T2b = 0x0A00, |
|
671 OP_VSTRb = 0x0A00, |
|
672 OP_VMOV_CtoSb = 0x0A10, |
|
673 OP_VMOV_StoCb = 0x0A10, |
|
674 OP_VMRSb = 0x0A10, |
|
675 OP_VCMP_T1b = 0x0A40, |
|
676 OP_VCVT_FPIVFPb = 0x0A40, |
|
677 OP_VSUB_T2b = 0x0A40, |
|
678 OP_NOP_T2b = 0x8000, |
|
679 OP_B_T4b = 0x9000, |
|
680 } OpcodeID2; |
|
681 |
|
682 struct FourFours { |
|
683 FourFours(unsigned f3, unsigned f2, unsigned f1, unsigned f0) |
|
684 { |
|
685 m_u.f0 = f0; |
|
686 m_u.f1 = f1; |
|
687 m_u.f2 = f2; |
|
688 m_u.f3 = f3; |
|
689 } |
|
690 |
|
691 union { |
|
692 unsigned value; |
|
693 struct { |
|
694 unsigned f0 : 4; |
|
695 unsigned f1 : 4; |
|
696 unsigned f2 : 4; |
|
697 unsigned f3 : 4; |
|
698 }; |
|
699 } m_u; |
|
700 }; |
|
701 |
|
702 class ARMInstructionFormatter; |
|
703 |
|
704 // false means else! |
|
705 bool ifThenElseConditionBit(Condition condition, bool isIf) |
|
706 { |
|
707 return isIf ? (condition & 1) : !(condition & 1); |
|
708 } |
|
709 uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if) |
|
710 { |
|
711 int mask = (ifThenElseConditionBit(condition, inst2if) << 3) |
|
712 | (ifThenElseConditionBit(condition, inst3if) << 2) |
|
713 | (ifThenElseConditionBit(condition, inst4if) << 1) |
|
714 | 1; |
|
715 ASSERT((condition != ConditionAL) || (mask & (mask - 1))); |
|
716 return (condition << 4) | mask; |
|
717 } |
|
718 uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if) |
|
719 { |
|
720 int mask = (ifThenElseConditionBit(condition, inst2if) << 3) |
|
721 | (ifThenElseConditionBit(condition, inst3if) << 2) |
|
722 | 2; |
|
723 ASSERT((condition != ConditionAL) || (mask & (mask - 1))); |
|
724 return (condition << 4) | mask; |
|
725 } |
|
726 uint8_t ifThenElse(Condition condition, bool inst2if) |
|
727 { |
|
728 int mask = (ifThenElseConditionBit(condition, inst2if) << 3) |
|
729 | 4; |
|
730 ASSERT((condition != ConditionAL) || (mask & (mask - 1))); |
|
731 return (condition << 4) | mask; |
|
732 } |
|
733 |
|
734 uint8_t ifThenElse(Condition condition) |
|
735 { |
|
736 int mask = 8; |
|
737 ASSERT((condition != ConditionAL) || (mask & (mask - 1))); |
|
738 return (condition << 4) | mask; |
|
739 } |
|
740 |
|
741 public: |
|
742 |
|
743 void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) |
|
744 { |
|
745 // Rd can only be SP if Rn is also SP. |
|
746 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); |
|
747 ASSERT(rd != ARMRegisters::pc); |
|
748 ASSERT(rn != ARMRegisters::pc); |
|
749 ASSERT(imm.isValid()); |
|
750 |
|
751 if (rn == ARMRegisters::sp) { |
|
752 if (!(rd & 8) && imm.isUInt10()) { |
|
753 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, imm.getUInt10() >> 2); |
|
754 return; |
|
755 } else if ((rd == ARMRegisters::sp) && imm.isUInt9()) { |
|
756 m_formatter.oneWordOp9Imm7(OP_ADD_SP_imm_T2, imm.getUInt9() >> 2); |
|
757 return; |
|
758 } |
|
759 } else if (!((rd | rn) & 8)) { |
|
760 if (imm.isUInt3()) { |
|
761 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd); |
|
762 return; |
|
763 } else if ((rd == rn) && imm.isUInt8()) { |
|
764 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8()); |
|
765 return; |
|
766 } |
|
767 } |
|
768 |
|
769 if (imm.isEncodedImm()) |
|
770 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3, rn, rd, imm); |
|
771 else { |
|
772 ASSERT(imm.isUInt12()); |
|
773 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4, rn, rd, imm); |
|
774 } |
|
775 } |
|
776 |
|
777 void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) |
|
778 { |
|
779 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); |
|
780 ASSERT(rd != ARMRegisters::pc); |
|
781 ASSERT(rn != ARMRegisters::pc); |
|
782 ASSERT(!BadReg(rm)); |
|
783 m_formatter.twoWordOp12Reg4FourFours(OP_ADD_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm)); |
|
784 } |
|
785 |
|
786 // NOTE: In an IT block, add doesn't modify the flags register. |
|
787 void add(RegisterID rd, RegisterID rn, RegisterID rm) |
|
788 { |
|
789 if (rd == rn) |
|
790 m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rm, rd); |
|
791 else if (rd == rm) |
|
792 m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rn, rd); |
|
793 else if (!((rd | rn | rm) & 8)) |
|
794 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd); |
|
795 else |
|
796 add(rd, rn, rm, ShiftTypeAndAmount()); |
|
797 } |
|
798 |
|
799 // Not allowed in an IT (if then) block. |
|
800 void add_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) |
|
801 { |
|
802 // Rd can only be SP if Rn is also SP. |
|
803 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); |
|
804 ASSERT(rd != ARMRegisters::pc); |
|
805 ASSERT(rn != ARMRegisters::pc); |
|
806 ASSERT(imm.isEncodedImm()); |
|
807 |
|
808 if (!((rd | rn) & 8)) { |
|
809 if (imm.isUInt3()) { |
|
810 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd); |
|
811 return; |
|
812 } else if ((rd == rn) && imm.isUInt8()) { |
|
813 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8()); |
|
814 return; |
|
815 } |
|
816 } |
|
817 |
|
818 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_S_imm_T3, rn, rd, imm); |
|
819 } |
|
820 |
|
821 // Not allowed in an IT (if then) block? |
|
822 void add_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) |
|
823 { |
|
824 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); |
|
825 ASSERT(rd != ARMRegisters::pc); |
|
826 ASSERT(rn != ARMRegisters::pc); |
|
827 ASSERT(!BadReg(rm)); |
|
828 m_formatter.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm)); |
|
829 } |
|
830 |
|
831 // Not allowed in an IT (if then) block. |
|
832 void add_S(RegisterID rd, RegisterID rn, RegisterID rm) |
|
833 { |
|
834 if (!((rd | rn | rm) & 8)) |
|
835 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd); |
|
836 else |
|
837 add_S(rd, rn, rm, ShiftTypeAndAmount()); |
|
838 } |
|
839 |
|
840 void ARM_and(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) |
|
841 { |
|
842 ASSERT(!BadReg(rd)); |
|
843 ASSERT(!BadReg(rn)); |
|
844 ASSERT(imm.isEncodedImm()); |
|
845 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1, rn, rd, imm); |
|
846 } |
|
847 |
|
848 void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) |
|
849 { |
|
850 ASSERT(!BadReg(rd)); |
|
851 ASSERT(!BadReg(rn)); |
|
852 ASSERT(!BadReg(rm)); |
|
853 m_formatter.twoWordOp12Reg4FourFours(OP_AND_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm)); |
|
854 } |
|
855 |
|
856 void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm) |
|
857 { |
|
858 if ((rd == rn) && !((rd | rm) & 8)) |
|
859 m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rm, rd); |
|
860 else if ((rd == rm) && !((rd | rn) & 8)) |
|
861 m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rn, rd); |
|
862 else |
|
863 ARM_and(rd, rn, rm, ShiftTypeAndAmount()); |
|
864 } |
|
865 |
|
866 void asr(RegisterID rd, RegisterID rm, int32_t shiftAmount) |
|
867 { |
|
868 ASSERT(!BadReg(rd)); |
|
869 ASSERT(!BadReg(rm)); |
|
870 ShiftTypeAndAmount shift(SRType_ASR, shiftAmount); |
|
871 m_formatter.twoWordOp16FourFours(OP_ASR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm)); |
|
872 } |
|
873 |
|
874 void asr(RegisterID rd, RegisterID rn, RegisterID rm) |
|
875 { |
|
876 ASSERT(!BadReg(rd)); |
|
877 ASSERT(!BadReg(rn)); |
|
878 ASSERT(!BadReg(rm)); |
|
879 m_formatter.twoWordOp12Reg4FourFours(OP_ASR_reg_T2, rn, FourFours(0xf, rd, 0, rm)); |
|
880 } |
|
881 |
|
882 // Only allowed in IT (if then) block if last instruction. |
|
883 JmpSrc b() |
|
884 { |
|
885 m_formatter.twoWordOp16Op16(OP_B_T4a, OP_B_T4b); |
|
886 return JmpSrc(m_formatter.size()); |
|
887 } |
|
888 |
|
889 // Only allowed in IT (if then) block if last instruction. |
|
890 JmpSrc blx(RegisterID rm) |
|
891 { |
|
892 ASSERT(rm != ARMRegisters::pc); |
|
893 m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8); |
|
894 return JmpSrc(m_formatter.size()); |
|
895 } |
|
896 |
|
897 // Only allowed in IT (if then) block if last instruction. |
|
898 JmpSrc bx(RegisterID rm) |
|
899 { |
|
900 m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0); |
|
901 return JmpSrc(m_formatter.size()); |
|
902 } |
|
903 |
|
904 void bkpt(uint8_t imm=0) |
|
905 { |
|
906 m_formatter.oneWordOp8Imm8(OP_BKPT, imm); |
|
907 } |
|
908 |
|
909 void cmn(RegisterID rn, ARMThumbImmediate imm) |
|
910 { |
|
911 ASSERT(rn != ARMRegisters::pc); |
|
912 ASSERT(imm.isEncodedImm()); |
|
913 |
|
914 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm, rn, (RegisterID)0xf, imm); |
|
915 } |
|
916 |
|
917 void cmp(RegisterID rn, ARMThumbImmediate imm) |
|
918 { |
|
919 ASSERT(rn != ARMRegisters::pc); |
|
920 ASSERT(imm.isEncodedImm()); |
|
921 |
|
922 if (!(rn & 8) && imm.isUInt8()) |
|
923 m_formatter.oneWordOp5Reg3Imm8(OP_CMP_imm_T1, rn, imm.getUInt8()); |
|
924 else |
|
925 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2, rn, (RegisterID)0xf, imm); |
|
926 } |
|
927 |
|
928 void cmp(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) |
|
929 { |
|
930 ASSERT(rn != ARMRegisters::pc); |
|
931 ASSERT(!BadReg(rm)); |
|
932 m_formatter.twoWordOp12Reg4FourFours(OP_CMP_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm)); |
|
933 } |
|
934 |
|
935 void cmp(RegisterID rn, RegisterID rm) |
|
936 { |
|
937 if ((rn | rm) & 8) |
|
938 cmp(rn, rm, ShiftTypeAndAmount()); |
|
939 else |
|
940 m_formatter.oneWordOp10Reg3Reg3(OP_CMP_reg_T1, rm, rn); |
|
941 } |
|
942 |
|
943 // xor is not spelled with an 'e'. :-( |
|
944 void eor(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) |
|
945 { |
|
946 ASSERT(!BadReg(rd)); |
|
947 ASSERT(!BadReg(rn)); |
|
948 ASSERT(imm.isEncodedImm()); |
|
949 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1, rn, rd, imm); |
|
950 } |
|
951 |
|
952 // xor is not spelled with an 'e'. :-( |
|
953 void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) |
|
954 { |
|
955 ASSERT(!BadReg(rd)); |
|
956 ASSERT(!BadReg(rn)); |
|
957 ASSERT(!BadReg(rm)); |
|
958 m_formatter.twoWordOp12Reg4FourFours(OP_EOR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm)); |
|
959 } |
|
960 |
|
961 // xor is not spelled with an 'e'. :-( |
|
962 void eor(RegisterID rd, RegisterID rn, RegisterID rm) |
|
963 { |
|
964 if ((rd == rn) && !((rd | rm) & 8)) |
|
965 m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rm, rd); |
|
966 else if ((rd == rm) && !((rd | rn) & 8)) |
|
967 m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rn, rd); |
|
968 else |
|
969 eor(rd, rn, rm, ShiftTypeAndAmount()); |
|
970 } |
|
971 |
|
972 void it(Condition cond) |
|
973 { |
|
974 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond)); |
|
975 } |
|
976 |
|
977 void it(Condition cond, bool inst2if) |
|
978 { |
|
979 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if)); |
|
980 } |
|
981 |
|
982 void it(Condition cond, bool inst2if, bool inst3if) |
|
983 { |
|
984 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if)); |
|
985 } |
|
986 |
|
987 void it(Condition cond, bool inst2if, bool inst3if, bool inst4if) |
|
988 { |
|
989 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if, inst4if)); |
|
990 } |
|
991 |
|
992 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block. |
|
993 void ldr(RegisterID rt, RegisterID rn, ARMThumbImmediate imm) |
|
994 { |
|
995 ASSERT(rn != ARMRegisters::pc); // LDR (literal) |
|
996 ASSERT(imm.isUInt12()); |
|
997 |
|
998 if (!((rt | rn) & 8) && imm.isUInt7()) |
|
999 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt); |
|
1000 else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10()) |
|
1001 m_formatter.oneWordOp5Reg3Imm8(OP_LDR_imm_T2, rt, imm.getUInt10() >> 2); |
|
1002 else |
|
1003 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, imm.getUInt12()); |
|
1004 } |
|
1005 |
|
1006 // If index is set, this is a regular offset or a pre-indexed load; |
|
1007 // if index is not set then is is a post-index load. |
|
1008 // |
|
1009 // If wback is set rn is updated - this is a pre or post index load, |
|
1010 // if wback is not set this is a regular offset memory access. |
|
1011 // |
|
1012 // (-255 <= offset <= 255) |
|
1013 // _reg = REG[rn] |
|
1014 // _tmp = _reg + offset |
|
1015 // MEM[index ? _tmp : _reg] = REG[rt] |
|
1016 // if (wback) REG[rn] = _tmp |
|
1017 void ldr(RegisterID rt, RegisterID rn, int offset, bool index, bool wback) |
|
1018 { |
|
1019 ASSERT(rt != ARMRegisters::pc); |
|
1020 ASSERT(rn != ARMRegisters::pc); |
|
1021 ASSERT(index || wback); |
|
1022 ASSERT(!wback | (rt != rn)); |
|
1023 |
|
1024 bool add = true; |
|
1025 if (offset < 0) { |
|
1026 add = false; |
|
1027 offset = -offset; |
|
1028 } |
|
1029 ASSERT((offset & ~0xff) == 0); |
|
1030 |
|
1031 offset |= (wback << 8); |
|
1032 offset |= (add << 9); |
|
1033 offset |= (index << 10); |
|
1034 offset |= (1 << 11); |
|
1035 |
|
1036 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4, rn, rt, offset); |
|
1037 } |
|
1038 |
|
1039 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block. |
|
1040 void ldr(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0) |
|
1041 { |
|
1042 ASSERT(rn != ARMRegisters::pc); // LDR (literal) |
|
1043 ASSERT(!BadReg(rm)); |
|
1044 ASSERT(shift <= 3); |
|
1045 |
|
1046 if (!shift && !((rt | rn | rm) & 8)) |
|
1047 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1, rm, rn, rt); |
|
1048 else |
|
1049 m_formatter.twoWordOp12Reg4FourFours(OP_LDR_reg_T2, rn, FourFours(rt, 0, shift, rm)); |
|
1050 } |
|
1051 |
|
1052 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block. |
|
1053 void ldrh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm) |
|
1054 { |
|
1055 ASSERT(rn != ARMRegisters::pc); // LDR (literal) |
|
1056 ASSERT(imm.isUInt12()); |
|
1057 |
|
1058 if (!((rt | rn) & 8) && imm.isUInt6()) |
|
1059 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 2, rn, rt); |
|
1060 else |
|
1061 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2, rn, rt, imm.getUInt12()); |
|
1062 } |
|
1063 |
|
1064 // If index is set, this is a regular offset or a pre-indexed load; |
|
1065 // if index is not set then is is a post-index load. |
|
1066 // |
|
1067 // If wback is set rn is updated - this is a pre or post index load, |
|
1068 // if wback is not set this is a regular offset memory access. |
|
1069 // |
|
1070 // (-255 <= offset <= 255) |
|
1071 // _reg = REG[rn] |
|
1072 // _tmp = _reg + offset |
|
1073 // MEM[index ? _tmp : _reg] = REG[rt] |
|
1074 // if (wback) REG[rn] = _tmp |
|
1075 void ldrh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback) |
|
1076 { |
|
1077 ASSERT(rt != ARMRegisters::pc); |
|
1078 ASSERT(rn != ARMRegisters::pc); |
|
1079 ASSERT(index || wback); |
|
1080 ASSERT(!wback | (rt != rn)); |
|
1081 |
|
1082 bool add = true; |
|
1083 if (offset < 0) { |
|
1084 add = false; |
|
1085 offset = -offset; |
|
1086 } |
|
1087 ASSERT((offset & ~0xff) == 0); |
|
1088 |
|
1089 offset |= (wback << 8); |
|
1090 offset |= (add << 9); |
|
1091 offset |= (index << 10); |
|
1092 offset |= (1 << 11); |
|
1093 |
|
1094 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3, rn, rt, offset); |
|
1095 } |
|
1096 |
|
1097 void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0) |
|
1098 { |
|
1099 ASSERT(!BadReg(rt)); // Memory hint |
|
1100 ASSERT(rn != ARMRegisters::pc); // LDRH (literal) |
|
1101 ASSERT(!BadReg(rm)); |
|
1102 ASSERT(shift <= 3); |
|
1103 |
|
1104 if (!shift && !((rt | rn | rm) & 8)) |
|
1105 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1, rm, rn, rt); |
|
1106 else |
|
1107 m_formatter.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2, rn, FourFours(rt, 0, shift, rm)); |
|
1108 } |
|
1109 |
|
1110 void ldrb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm) |
|
1111 { |
|
1112 ASSERT(rn != ARMRegisters::pc); // LDR (literal) |
|
1113 ASSERT(imm.isUInt12()); |
|
1114 |
|
1115 if (!((rt | rn) & 8) && imm.isUInt5()) |
|
1116 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRB_imm_T1, imm.getUInt5(), rn, rt); |
|
1117 else |
|
1118 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T2, rn, rt, imm.getUInt12()); |
|
1119 } |
|
1120 |
|
1121 void ldrb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback) |
|
1122 { |
|
1123 ASSERT(rt != ARMRegisters::pc); |
|
1124 ASSERT(rn != ARMRegisters::pc); |
|
1125 ASSERT(index || wback); |
|
1126 ASSERT(!wback | (rt != rn)); |
|
1127 |
|
1128 bool add = true; |
|
1129 if (offset < 0) { |
|
1130 add = false; |
|
1131 offset = -offset; |
|
1132 } |
|
1133 |
|
1134 ASSERT(!(offset & ~0xff)); |
|
1135 |
|
1136 offset |= (wback << 8); |
|
1137 offset |= (add << 9); |
|
1138 offset |= (index << 10); |
|
1139 offset |= (1 << 11); |
|
1140 |
|
1141 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T3, rn, rt, offset); |
|
1142 } |
|
1143 |
|
1144 void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0) |
|
1145 { |
|
1146 ASSERT(rn != ARMRegisters::pc); // LDR (literal) |
|
1147 ASSERT(!BadReg(rm)); |
|
1148 ASSERT(shift <= 3); |
|
1149 |
|
1150 if (!shift && !((rt | rn | rm) & 8)) |
|
1151 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRB_reg_T1, rm, rn, rt); |
|
1152 else |
|
1153 m_formatter.twoWordOp12Reg4FourFours(OP_LDRB_reg_T2, rn, FourFours(rt, 0, shift, rm)); |
|
1154 } |
|
1155 |
|
1156 void lsl(RegisterID rd, RegisterID rm, int32_t shiftAmount) |
|
1157 { |
|
1158 ASSERT(!BadReg(rd)); |
|
1159 ASSERT(!BadReg(rm)); |
|
1160 ShiftTypeAndAmount shift(SRType_LSL, shiftAmount); |
|
1161 m_formatter.twoWordOp16FourFours(OP_LSL_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm)); |
|
1162 } |
|
1163 |
|
1164 void lsl(RegisterID rd, RegisterID rn, RegisterID rm) |
|
1165 { |
|
1166 ASSERT(!BadReg(rd)); |
|
1167 ASSERT(!BadReg(rn)); |
|
1168 ASSERT(!BadReg(rm)); |
|
1169 m_formatter.twoWordOp12Reg4FourFours(OP_LSL_reg_T2, rn, FourFours(0xf, rd, 0, rm)); |
|
1170 } |
|
1171 |
|
1172 void lsr(RegisterID rd, RegisterID rm, int32_t shiftAmount) |
|
1173 { |
|
1174 ASSERT(!BadReg(rd)); |
|
1175 ASSERT(!BadReg(rm)); |
|
1176 ShiftTypeAndAmount shift(SRType_LSR, shiftAmount); |
|
1177 m_formatter.twoWordOp16FourFours(OP_LSR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm)); |
|
1178 } |
|
1179 |
|
1180 void lsr(RegisterID rd, RegisterID rn, RegisterID rm) |
|
1181 { |
|
1182 ASSERT(!BadReg(rd)); |
|
1183 ASSERT(!BadReg(rn)); |
|
1184 ASSERT(!BadReg(rm)); |
|
1185 m_formatter.twoWordOp12Reg4FourFours(OP_LSR_reg_T2, rn, FourFours(0xf, rd, 0, rm)); |
|
1186 } |
|
1187 |
|
1188 void movT3(RegisterID rd, ARMThumbImmediate imm) |
|
1189 { |
|
1190 ASSERT(imm.isValid()); |
|
1191 ASSERT(!imm.isEncodedImm()); |
|
1192 ASSERT(!BadReg(rd)); |
|
1193 |
|
1194 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3, imm.m_value.imm4, rd, imm); |
|
1195 } |
|
1196 |
|
1197 void mov(RegisterID rd, ARMThumbImmediate imm) |
|
1198 { |
|
1199 ASSERT(imm.isValid()); |
|
1200 ASSERT(!BadReg(rd)); |
|
1201 |
|
1202 if ((rd < 8) && imm.isUInt8()) |
|
1203 m_formatter.oneWordOp5Reg3Imm8(OP_MOV_imm_T1, rd, imm.getUInt8()); |
|
1204 else if (imm.isEncodedImm()) |
|
1205 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T2, 0xf, rd, imm); |
|
1206 else |
|
1207 movT3(rd, imm); |
|
1208 } |
|
1209 |
|
1210 void mov(RegisterID rd, RegisterID rm) |
|
1211 { |
|
1212 m_formatter.oneWordOp8RegReg143(OP_MOV_reg_T1, rm, rd); |
|
1213 } |
|
1214 |
|
1215 void movt(RegisterID rd, ARMThumbImmediate imm) |
|
1216 { |
|
1217 ASSERT(imm.isUInt16()); |
|
1218 ASSERT(!BadReg(rd)); |
|
1219 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT, imm.m_value.imm4, rd, imm); |
|
1220 } |
|
1221 |
|
1222 void mvn(RegisterID rd, ARMThumbImmediate imm) |
|
1223 { |
|
1224 ASSERT(imm.isEncodedImm()); |
|
1225 ASSERT(!BadReg(rd)); |
|
1226 |
|
1227 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm, 0xf, rd, imm); |
|
1228 } |
|
1229 |
|
1230 void mvn(RegisterID rd, RegisterID rm, ShiftTypeAndAmount shift) |
|
1231 { |
|
1232 ASSERT(!BadReg(rd)); |
|
1233 ASSERT(!BadReg(rm)); |
|
1234 m_formatter.twoWordOp16FourFours(OP_MVN_reg_T2, FourFours(shift.hi4(), rd, shift.lo4(), rm)); |
|
1235 } |
|
1236 |
|
1237 void mvn(RegisterID rd, RegisterID rm) |
|
1238 { |
|
1239 if (!((rd | rm) & 8)) |
|
1240 m_formatter.oneWordOp10Reg3Reg3(OP_MVN_reg_T1, rm, rd); |
|
1241 else |
|
1242 mvn(rd, rm, ShiftTypeAndAmount()); |
|
1243 } |
|
1244 |
|
1245 void neg(RegisterID rd, RegisterID rm) |
|
1246 { |
|
1247 ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0); |
|
1248 sub(rd, zero, rm); |
|
1249 } |
|
1250 |
|
1251 void orr(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) |
|
1252 { |
|
1253 ASSERT(!BadReg(rd)); |
|
1254 ASSERT(!BadReg(rn)); |
|
1255 ASSERT(imm.isEncodedImm()); |
|
1256 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1, rn, rd, imm); |
|
1257 } |
|
1258 |
|
1259 void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) |
|
1260 { |
|
1261 ASSERT(!BadReg(rd)); |
|
1262 ASSERT(!BadReg(rn)); |
|
1263 ASSERT(!BadReg(rm)); |
|
1264 m_formatter.twoWordOp12Reg4FourFours(OP_ORR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm)); |
|
1265 } |
|
1266 |
|
1267 void orr(RegisterID rd, RegisterID rn, RegisterID rm) |
|
1268 { |
|
1269 if ((rd == rn) && !((rd | rm) & 8)) |
|
1270 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd); |
|
1271 else if ((rd == rm) && !((rd | rn) & 8)) |
|
1272 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd); |
|
1273 else |
|
1274 orr(rd, rn, rm, ShiftTypeAndAmount()); |
|
1275 } |
|
1276 |
|
1277 void orr_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) |
|
1278 { |
|
1279 ASSERT(!BadReg(rd)); |
|
1280 ASSERT(!BadReg(rn)); |
|
1281 ASSERT(!BadReg(rm)); |
|
1282 m_formatter.twoWordOp12Reg4FourFours(OP_ORR_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm)); |
|
1283 } |
|
1284 |
|
1285 void orr_S(RegisterID rd, RegisterID rn, RegisterID rm) |
|
1286 { |
|
1287 if ((rd == rn) && !((rd | rm) & 8)) |
|
1288 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd); |
|
1289 else if ((rd == rm) && !((rd | rn) & 8)) |
|
1290 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd); |
|
1291 else |
|
1292 orr_S(rd, rn, rm, ShiftTypeAndAmount()); |
|
1293 } |
|
1294 |
|
1295 void ror(RegisterID rd, RegisterID rm, int32_t shiftAmount) |
|
1296 { |
|
1297 ASSERT(!BadReg(rd)); |
|
1298 ASSERT(!BadReg(rm)); |
|
1299 ShiftTypeAndAmount shift(SRType_ROR, shiftAmount); |
|
1300 m_formatter.twoWordOp16FourFours(OP_ROR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm)); |
|
1301 } |
|
1302 |
|
1303 void ror(RegisterID rd, RegisterID rn, RegisterID rm) |
|
1304 { |
|
1305 ASSERT(!BadReg(rd)); |
|
1306 ASSERT(!BadReg(rn)); |
|
1307 ASSERT(!BadReg(rm)); |
|
1308 m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm)); |
|
1309 } |
|
1310 |
|
1311 void smull(RegisterID rdLo, RegisterID rdHi, RegisterID rn, RegisterID rm) |
|
1312 { |
|
1313 ASSERT(!BadReg(rdLo)); |
|
1314 ASSERT(!BadReg(rdHi)); |
|
1315 ASSERT(!BadReg(rn)); |
|
1316 ASSERT(!BadReg(rm)); |
|
1317 ASSERT(rdLo != rdHi); |
|
1318 m_formatter.twoWordOp12Reg4FourFours(OP_SMULL_T1, rn, FourFours(rdLo, rdHi, 0, rm)); |
|
1319 } |
|
1320 |
|
1321 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block. |
|
1322 void str(RegisterID rt, RegisterID rn, ARMThumbImmediate imm) |
|
1323 { |
|
1324 ASSERT(rt != ARMRegisters::pc); |
|
1325 ASSERT(rn != ARMRegisters::pc); |
|
1326 ASSERT(imm.isUInt12()); |
|
1327 |
|
1328 if (!((rt | rn) & 8) && imm.isUInt7()) |
|
1329 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1, imm.getUInt7() >> 2, rn, rt); |
|
1330 else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10()) |
|
1331 m_formatter.oneWordOp5Reg3Imm8(OP_STR_imm_T2, rt, imm.getUInt10() >> 2); |
|
1332 else |
|
1333 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3, rn, rt, imm.getUInt12()); |
|
1334 } |
|
1335 |
|
1336 // If index is set, this is a regular offset or a pre-indexed store; |
|
1337 // if index is not set then is is a post-index store. |
|
1338 // |
|
1339 // If wback is set rn is updated - this is a pre or post index store, |
|
1340 // if wback is not set this is a regular offset memory access. |
|
1341 // |
|
1342 // (-255 <= offset <= 255) |
|
1343 // _reg = REG[rn] |
|
1344 // _tmp = _reg + offset |
|
1345 // MEM[index ? _tmp : _reg] = REG[rt] |
|
1346 // if (wback) REG[rn] = _tmp |
|
1347 void str(RegisterID rt, RegisterID rn, int offset, bool index, bool wback) |
|
1348 { |
|
1349 ASSERT(rt != ARMRegisters::pc); |
|
1350 ASSERT(rn != ARMRegisters::pc); |
|
1351 ASSERT(index || wback); |
|
1352 ASSERT(!wback | (rt != rn)); |
|
1353 |
|
1354 bool add = true; |
|
1355 if (offset < 0) { |
|
1356 add = false; |
|
1357 offset = -offset; |
|
1358 } |
|
1359 ASSERT((offset & ~0xff) == 0); |
|
1360 |
|
1361 offset |= (wback << 8); |
|
1362 offset |= (add << 9); |
|
1363 offset |= (index << 10); |
|
1364 offset |= (1 << 11); |
|
1365 |
|
1366 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4, rn, rt, offset); |
|
1367 } |
|
1368 |
|
1369 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block. |
|
1370 void str(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0) |
|
1371 { |
|
1372 ASSERT(rn != ARMRegisters::pc); |
|
1373 ASSERT(!BadReg(rm)); |
|
1374 ASSERT(shift <= 3); |
|
1375 |
|
1376 if (!shift && !((rt | rn | rm) & 8)) |
|
1377 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1, rm, rn, rt); |
|
1378 else |
|
1379 m_formatter.twoWordOp12Reg4FourFours(OP_STR_reg_T2, rn, FourFours(rt, 0, shift, rm)); |
|
1380 } |
|
1381 |
|
1382 void sub(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) |
|
1383 { |
|
1384 // Rd can only be SP if Rn is also SP. |
|
1385 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); |
|
1386 ASSERT(rd != ARMRegisters::pc); |
|
1387 ASSERT(rn != ARMRegisters::pc); |
|
1388 ASSERT(imm.isValid()); |
|
1389 |
|
1390 if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) { |
|
1391 m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, imm.getUInt9() >> 2); |
|
1392 return; |
|
1393 } else if (!((rd | rn) & 8)) { |
|
1394 if (imm.isUInt3()) { |
|
1395 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd); |
|
1396 return; |
|
1397 } else if ((rd == rn) && imm.isUInt8()) { |
|
1398 m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8()); |
|
1399 return; |
|
1400 } |
|
1401 } |
|
1402 |
|
1403 if (imm.isEncodedImm()) |
|
1404 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3, rn, rd, imm); |
|
1405 else { |
|
1406 ASSERT(imm.isUInt12()); |
|
1407 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4, rn, rd, imm); |
|
1408 } |
|
1409 } |
|
1410 |
|
1411 void sub(RegisterID rd, ARMThumbImmediate imm, RegisterID rn) |
|
1412 { |
|
1413 ASSERT(rd != ARMRegisters::pc); |
|
1414 ASSERT(rn != ARMRegisters::pc); |
|
1415 ASSERT(imm.isValid()); |
|
1416 ASSERT(imm.isUInt12()); |
|
1417 |
|
1418 if (!((rd | rn) & 8) && !imm.getUInt12()) |
|
1419 m_formatter.oneWordOp10Reg3Reg3(OP_RSB_imm_T1, rn, rd); |
|
1420 else |
|
1421 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_imm_T2, rn, rd, imm); |
|
1422 } |
|
1423 |
|
1424 void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) |
|
1425 { |
|
1426 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); |
|
1427 ASSERT(rd != ARMRegisters::pc); |
|
1428 ASSERT(rn != ARMRegisters::pc); |
|
1429 ASSERT(!BadReg(rm)); |
|
1430 m_formatter.twoWordOp12Reg4FourFours(OP_SUB_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm)); |
|
1431 } |
|
1432 |
|
1433 // NOTE: In an IT block, add doesn't modify the flags register. |
|
1434 void sub(RegisterID rd, RegisterID rn, RegisterID rm) |
|
1435 { |
|
1436 if (!((rd | rn | rm) & 8)) |
|
1437 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd); |
|
1438 else |
|
1439 sub(rd, rn, rm, ShiftTypeAndAmount()); |
|
1440 } |
|
1441 |
|
1442 // Not allowed in an IT (if then) block. |
|
1443 void sub_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) |
|
1444 { |
|
1445 // Rd can only be SP if Rn is also SP. |
|
1446 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); |
|
1447 ASSERT(rd != ARMRegisters::pc); |
|
1448 ASSERT(rn != ARMRegisters::pc); |
|
1449 ASSERT(imm.isValid()); |
|
1450 |
|
1451 if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) { |
|
1452 m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, imm.getUInt9() >> 2); |
|
1453 return; |
|
1454 } else if (!((rd | rn) & 8)) { |
|
1455 if (imm.isUInt3()) { |
|
1456 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd); |
|
1457 return; |
|
1458 } else if ((rd == rn) && imm.isUInt8()) { |
|
1459 m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8()); |
|
1460 return; |
|
1461 } |
|
1462 } |
|
1463 |
|
1464 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3, rn, rd, imm); |
|
1465 } |
|
1466 |
|
1467 // Not allowed in an IT (if then) block? |
|
1468 void sub_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) |
|
1469 { |
|
1470 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); |
|
1471 ASSERT(rd != ARMRegisters::pc); |
|
1472 ASSERT(rn != ARMRegisters::pc); |
|
1473 ASSERT(!BadReg(rm)); |
|
1474 m_formatter.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm)); |
|
1475 } |
|
1476 |
|
1477 // Not allowed in an IT (if then) block. |
|
1478 void sub_S(RegisterID rd, RegisterID rn, RegisterID rm) |
|
1479 { |
|
1480 if (!((rd | rn | rm) & 8)) |
|
1481 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd); |
|
1482 else |
|
1483 sub_S(rd, rn, rm, ShiftTypeAndAmount()); |
|
1484 } |
|
1485 |
|
1486 void tst(RegisterID rn, ARMThumbImmediate imm) |
|
1487 { |
|
1488 ASSERT(!BadReg(rn)); |
|
1489 ASSERT(imm.isEncodedImm()); |
|
1490 |
|
1491 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm, rn, (RegisterID)0xf, imm); |
|
1492 } |
|
1493 |
|
1494 void tst(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) |
|
1495 { |
|
1496 ASSERT(!BadReg(rn)); |
|
1497 ASSERT(!BadReg(rm)); |
|
1498 m_formatter.twoWordOp12Reg4FourFours(OP_TST_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm)); |
|
1499 } |
|
1500 |
|
1501 void tst(RegisterID rn, RegisterID rm) |
|
1502 { |
|
1503 if ((rn | rm) & 8) |
|
1504 tst(rn, rm, ShiftTypeAndAmount()); |
|
1505 else |
|
1506 m_formatter.oneWordOp10Reg3Reg3(OP_TST_reg_T1, rm, rn); |
|
1507 } |
|
1508 |
|
1509 void vadd_F64(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm) |
|
1510 { |
|
1511 m_formatter.vfpOp(OP_VADD_T2, OP_VADD_T2b, true, rn, rd, rm); |
|
1512 } |
|
1513 |
|
1514 void vcmp_F64(FPDoubleRegisterID rd, FPDoubleRegisterID rm) |
|
1515 { |
|
1516 m_formatter.vfpOp(OP_VCMP_T1, OP_VCMP_T1b, true, VFPOperand(4), rd, rm); |
|
1517 } |
|
1518 |
|
1519 void vcvt_F64_S32(FPDoubleRegisterID rd, FPSingleRegisterID rm) |
|
1520 { |
|
1521 // boolean values are 64bit (toInt, unsigned, roundZero) |
|
1522 m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(false, false, false), rd, rm); |
|
1523 } |
|
1524 |
|
1525 void vcvtr_S32_F64(FPSingleRegisterID rd, FPDoubleRegisterID rm) |
|
1526 { |
|
1527 // boolean values are 64bit (toInt, unsigned, roundZero) |
|
1528 m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, false, true), rd, rm); |
|
1529 } |
|
1530 |
|
1531 void vdiv_F64(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm) |
|
1532 { |
|
1533 m_formatter.vfpOp(OP_VDIV, OP_VDIVb, true, rn, rd, rm); |
|
1534 } |
|
1535 |
|
1536 void vldr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm) |
|
1537 { |
|
1538 m_formatter.vfpMemOp(OP_VLDR, OP_VLDRb, true, rn, rd, imm); |
|
1539 } |
|
1540 |
|
1541 void vmov_F64_0(FPDoubleRegisterID rd) |
|
1542 { |
|
1543 m_formatter.vfpOp(OP_VMOV_IMM_T2, OP_VMOV_IMM_T2b, true, VFPOperand(0), rd, VFPOperand(0)); |
|
1544 } |
|
1545 |
|
1546 void vmov(RegisterID rd, FPSingleRegisterID rn) |
|
1547 { |
|
1548 ASSERT(!BadReg(rd)); |
|
1549 m_formatter.vfpOp(OP_VMOV_CtoS, OP_VMOV_CtoSb, false, rn, rd, VFPOperand(0)); |
|
1550 } |
|
1551 |
|
1552 void vmov(FPSingleRegisterID rd, RegisterID rn) |
|
1553 { |
|
1554 ASSERT(!BadReg(rn)); |
|
1555 m_formatter.vfpOp(OP_VMOV_StoC, OP_VMOV_StoCb, false, rd, rn, VFPOperand(0)); |
|
1556 } |
|
1557 |
|
1558 void vmrs(RegisterID reg = ARMRegisters::pc) |
|
1559 { |
|
1560 ASSERT(reg != ARMRegisters::sp); |
|
1561 m_formatter.vfpOp(OP_VMRS, OP_VMRSb, false, VFPOperand(1), VFPOperand(0x10 | reg), VFPOperand(0)); |
|
1562 } |
|
1563 |
|
1564 void vmul_F64(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm) |
|
1565 { |
|
1566 m_formatter.vfpOp(OP_VMUL_T2, OP_VMUL_T2b, true, rn, rd, rm); |
|
1567 } |
|
1568 |
|
1569 void vstr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm) |
|
1570 { |
|
1571 m_formatter.vfpMemOp(OP_VSTR, OP_VSTRb, true, rn, rd, imm); |
|
1572 } |
|
1573 |
|
1574 void vsub_F64(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm) |
|
1575 { |
|
1576 m_formatter.vfpOp(OP_VSUB_T2, OP_VSUB_T2b, true, rn, rd, rm); |
|
1577 } |
|
1578 |
|
1579 JmpDst label() |
|
1580 { |
|
1581 return JmpDst(m_formatter.size()); |
|
1582 } |
|
1583 |
|
1584 JmpDst align(int alignment) |
|
1585 { |
|
1586 while (!m_formatter.isAligned(alignment)) |
|
1587 bkpt(); |
|
1588 |
|
1589 return label(); |
|
1590 } |
|
1591 |
|
1592 static void* getRelocatedAddress(void* code, JmpSrc jump) |
|
1593 { |
|
1594 ASSERT(jump.m_offset != -1); |
|
1595 |
|
1596 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + jump.m_offset); |
|
1597 } |
|
1598 |
|
1599 static void* getRelocatedAddress(void* code, JmpDst destination) |
|
1600 { |
|
1601 ASSERT(destination.m_offset != -1); |
|
1602 |
|
1603 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + destination.m_offset); |
|
1604 } |
|
1605 |
|
1606 static int getDifferenceBetweenLabels(JmpDst src, JmpDst dst) |
|
1607 { |
|
1608 return dst.m_offset - src.m_offset; |
|
1609 } |
|
1610 |
|
1611 static int getDifferenceBetweenLabels(JmpDst src, JmpSrc dst) |
|
1612 { |
|
1613 return dst.m_offset - src.m_offset; |
|
1614 } |
|
1615 |
|
1616 static int getDifferenceBetweenLabels(JmpSrc src, JmpDst dst) |
|
1617 { |
|
1618 return dst.m_offset - src.m_offset; |
|
1619 } |
|
1620 |
|
1621 // Assembler admin methods: |
|
1622 |
|
1623 size_t size() const |
|
1624 { |
|
1625 return m_formatter.size(); |
|
1626 } |
|
1627 |
|
1628 void* executableCopy(ExecutablePool* allocator) |
|
1629 { |
|
1630 void* copy = m_formatter.executableCopy(allocator); |
|
1631 |
|
1632 unsigned jumpCount = m_jumpsToLink.size(); |
|
1633 for (unsigned i = 0; i < jumpCount; ++i) { |
|
1634 uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(copy) + m_jumpsToLink[i].from); |
|
1635 uint16_t* target = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(copy) + m_jumpsToLink[i].to); |
|
1636 linkJumpAbsolute(location, target); |
|
1637 } |
|
1638 m_jumpsToLink.clear(); |
|
1639 |
|
1640 ASSERT(copy); |
|
1641 return copy; |
|
1642 } |
|
1643 |
|
1644 static unsigned getCallReturnOffset(JmpSrc call) |
|
1645 { |
|
1646 ASSERT(call.m_offset >= 0); |
|
1647 return call.m_offset; |
|
1648 } |
|
1649 |
|
1650 // Linking & patching: |
|
1651 // |
|
1652 // 'link' and 'patch' methods are for use on unprotected code - such as the code |
|
1653 // within the AssemblerBuffer, and code being patched by the patch buffer. Once |
|
1654 // code has been finalized it is (platform support permitting) within a non- |
|
1655 // writable region of memory; to modify the code in an execute-only execuable |
|
1656 // pool the 'repatch' and 'relink' methods should be used. |
|
1657 |
|
1658 void linkJump(JmpSrc from, JmpDst to) |
|
1659 { |
|
1660 ASSERT(to.m_offset != -1); |
|
1661 ASSERT(from.m_offset != -1); |
|
1662 m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset)); |
|
1663 } |
|
1664 |
|
1665 static void linkJump(void* code, JmpSrc from, void* to) |
|
1666 { |
|
1667 ASSERT(from.m_offset != -1); |
|
1668 |
|
1669 uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset); |
|
1670 linkJumpAbsolute(location, to); |
|
1671 } |
|
1672 |
|
1673 // bah, this mathod should really be static, since it is used by the LinkBuffer. |
|
1674 // return a bool saying whether the link was successful? |
|
1675 static void linkCall(void* code, JmpSrc from, void* to) |
|
1676 { |
|
1677 ASSERT(!(reinterpret_cast<intptr_t>(code) & 1)); |
|
1678 ASSERT(from.m_offset != -1); |
|
1679 ASSERT(reinterpret_cast<intptr_t>(to) & 1); |
|
1680 |
|
1681 setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset) - 1, to); |
|
1682 } |
|
1683 |
|
1684 static void linkPointer(void* code, JmpDst where, void* value) |
|
1685 { |
|
1686 setPointer(reinterpret_cast<char*>(code) + where.m_offset, value); |
|
1687 } |
|
1688 |
|
1689 static void relinkJump(void* from, void* to) |
|
1690 { |
|
1691 ASSERT(!(reinterpret_cast<intptr_t>(from) & 1)); |
|
1692 ASSERT(!(reinterpret_cast<intptr_t>(to) & 1)); |
|
1693 |
|
1694 linkJumpAbsolute(reinterpret_cast<uint16_t*>(from), to); |
|
1695 |
|
1696 ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(from) - 5, 5 * sizeof(uint16_t)); |
|
1697 } |
|
1698 |
|
1699 static void relinkCall(void* from, void* to) |
|
1700 { |
|
1701 ASSERT(!(reinterpret_cast<intptr_t>(from) & 1)); |
|
1702 ASSERT(reinterpret_cast<intptr_t>(to) & 1); |
|
1703 |
|
1704 setPointer(reinterpret_cast<uint16_t*>(from) - 1, to); |
|
1705 |
|
1706 ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(from) - 5, 4 * sizeof(uint16_t)); |
|
1707 } |
|
1708 |
|
1709 static void repatchInt32(void* where, int32_t value) |
|
1710 { |
|
1711 ASSERT(!(reinterpret_cast<intptr_t>(where) & 1)); |
|
1712 |
|
1713 setInt32(where, value); |
|
1714 |
|
1715 ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(where) - 4, 4 * sizeof(uint16_t)); |
|
1716 } |
|
1717 |
|
1718 static void repatchPointer(void* where, void* value) |
|
1719 { |
|
1720 ASSERT(!(reinterpret_cast<intptr_t>(where) & 1)); |
|
1721 |
|
1722 setPointer(where, value); |
|
1723 |
|
1724 ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(where) - 4, 4 * sizeof(uint16_t)); |
|
1725 } |
|
1726 |
|
1727 static void repatchLoadPtrToLEA(void* where) |
|
1728 { |
|
1729 ASSERT(!(reinterpret_cast<intptr_t>(where) & 1)); |
|
1730 uint16_t* loadOp = reinterpret_cast<uint16_t*>(where) + 4; |
|
1731 |
|
1732 ASSERT((loadOp[0] & 0xfff0) == OP_LDR_reg_T2); |
|
1733 ASSERT((loadOp[1] & 0x0ff0) == 0); |
|
1734 int rn = loadOp[0] & 0xf; |
|
1735 int rt = loadOp[1] >> 12; |
|
1736 int rm = loadOp[1] & 0xf; |
|
1737 |
|
1738 loadOp[0] = OP_ADD_reg_T3 | rn; |
|
1739 loadOp[1] = rt << 8 | rm; |
|
1740 ExecutableAllocator::cacheFlush(loadOp, sizeof(uint32_t)); |
|
1741 } |
|
1742 |
|
1743 private: |
|
1744 // VFP operations commonly take one or more 5-bit operands, typically representing a |
|
1745 // floating point register number. This will commonly be encoded in the instruction |
|
1746 // in two parts, with one single bit field, and one 4-bit field. In the case of |
|
1747 // double precision operands the high bit of the register number will be encoded |
|
1748 // separately, and for single precision operands the high bit of the register number |
|
1749 // will be encoded individually. |
|
1750 // VFPOperand encapsulates a 5-bit VFP operand, with bits 0..3 containing the 4-bit |
|
1751 // field to be encoded together in the instruction (the low 4-bits of a double |
|
1752 // register number, or the high 4-bits of a single register number), and bit 4 |
|
1753 // contains the bit value to be encoded individually. |
|
1754 struct VFPOperand { |
|
1755 explicit VFPOperand(uint32_t value) |
|
1756 : m_value(value) |
|
1757 { |
|
1758 ASSERT(!(m_value & ~0x1f)); |
|
1759 } |
|
1760 |
|
1761 VFPOperand(FPDoubleRegisterID reg) |
|
1762 : m_value(reg) |
|
1763 { |
|
1764 } |
|
1765 |
|
1766 VFPOperand(RegisterID reg) |
|
1767 : m_value(reg) |
|
1768 { |
|
1769 } |
|
1770 |
|
1771 VFPOperand(FPSingleRegisterID reg) |
|
1772 : m_value(((reg & 1) << 4) | (reg >> 1)) // rotate the lowest bit of 'reg' to the top. |
|
1773 { |
|
1774 } |
|
1775 |
|
1776 uint32_t bits1() |
|
1777 { |
|
1778 return m_value >> 4; |
|
1779 } |
|
1780 |
|
1781 uint32_t bits4() |
|
1782 { |
|
1783 return m_value & 0xf; |
|
1784 } |
|
1785 |
|
1786 uint32_t m_value; |
|
1787 }; |
|
1788 |
|
1789 VFPOperand vcvtOp(bool toInteger, bool isUnsigned, bool isRoundZero) |
|
1790 { |
|
1791 // Cannot specify rounding when converting to float. |
|
1792 ASSERT(toInteger || !isRoundZero); |
|
1793 |
|
1794 uint32_t op = 0x8; |
|
1795 if (toInteger) { |
|
1796 // opc2 indicates both toInteger & isUnsigned. |
|
1797 op |= isUnsigned ? 0x4 : 0x5; |
|
1798 // 'op' field in instruction is isRoundZero |
|
1799 if (isRoundZero) |
|
1800 op |= 0x10; |
|
1801 } else { |
|
1802 // 'op' field in instruction is isUnsigned |
|
1803 if (!isUnsigned) |
|
1804 op |= 0x10; |
|
1805 } |
|
1806 return VFPOperand(op); |
|
1807 } |
|
1808 |
|
1809 static void setInt32(void* code, uint32_t value) |
|
1810 { |
|
1811 uint16_t* location = reinterpret_cast<uint16_t*>(code); |
|
1812 ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2)); |
|
1813 |
|
1814 ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value)); |
|
1815 ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value >> 16)); |
|
1816 location[-4] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16); |
|
1817 location[-3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-3] >> 8) & 0xf, lo16); |
|
1818 location[-2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16); |
|
1819 location[-1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-1] >> 8) & 0xf, hi16); |
|
1820 |
|
1821 ExecutableAllocator::cacheFlush(location - 4, 4 * sizeof(uint16_t)); |
|
1822 } |
|
1823 |
|
1824 static void setPointer(void* code, void* value) |
|
1825 { |
|
1826 setInt32(code, reinterpret_cast<uint32_t>(value)); |
|
1827 } |
|
1828 |
|
1829 static bool isB(void* address) |
|
1830 { |
|
1831 uint16_t* instruction = static_cast<uint16_t*>(address); |
|
1832 return ((instruction[0] & 0xf800) == OP_B_T4a) && ((instruction[1] & 0xd000) == OP_B_T4b); |
|
1833 } |
|
1834 |
|
1835 static bool isBX(void* address) |
|
1836 { |
|
1837 uint16_t* instruction = static_cast<uint16_t*>(address); |
|
1838 return (instruction[0] & 0xff87) == OP_BX; |
|
1839 } |
|
1840 |
|
1841 static bool isMOV_imm_T3(void* address) |
|
1842 { |
|
1843 uint16_t* instruction = static_cast<uint16_t*>(address); |
|
1844 return ((instruction[0] & 0xFBF0) == OP_MOV_imm_T3) && ((instruction[1] & 0x8000) == 0); |
|
1845 } |
|
1846 |
|
1847 static bool isMOVT(void* address) |
|
1848 { |
|
1849 uint16_t* instruction = static_cast<uint16_t*>(address); |
|
1850 return ((instruction[0] & 0xFBF0) == OP_MOVT) && ((instruction[1] & 0x8000) == 0); |
|
1851 } |
|
1852 |
|
1853 static bool isNOP_T1(void* address) |
|
1854 { |
|
1855 uint16_t* instruction = static_cast<uint16_t*>(address); |
|
1856 return instruction[0] == OP_NOP_T1; |
|
1857 } |
|
1858 |
|
1859 static bool isNOP_T2(void* address) |
|
1860 { |
|
1861 uint16_t* instruction = static_cast<uint16_t*>(address); |
|
1862 return (instruction[0] == OP_NOP_T2a) && (instruction[1] == OP_NOP_T2b); |
|
1863 } |
|
1864 |
|
1865 static void linkJumpAbsolute(uint16_t* instruction, void* target) |
|
1866 { |
|
1867 // FIMXE: this should be up in the MacroAssembler layer. :-( |
|
1868 const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip; |
|
1869 |
|
1870 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1)); |
|
1871 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1)); |
|
1872 |
|
1873 ASSERT( (isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1)) |
|
1874 || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)) ); |
|
1875 |
|
1876 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction)); |
|
1877 |
|
1878 // From Cortex-A8 errata: |
|
1879 // If the 32-bit Thumb-2 branch instruction spans two 4KiB regions and |
|
1880 // the target of the branch falls within the first region it is |
|
1881 // possible for the processor to incorrectly determine the branch |
|
1882 // instruction, and it is also possible in some cases for the processor |
|
1883 // to enter a deadlock state. |
|
1884 // The instruction is spanning two pages if it ends at an address ending 0x002 |
|
1885 bool spansTwo4K = ((reinterpret_cast<intptr_t>(instruction) & 0xfff) == 0x002); |
|
1886 // The target is in the first page if the jump branch back by [3..0x1002] bytes |
|
1887 bool targetInFirstPage = (relative >= -0x1002) && (relative < -2); |
|
1888 bool wouldTriggerA8Errata = spansTwo4K && targetInFirstPage; |
|
1889 |
|
1890 if (((relative << 7) >> 7) == relative && !wouldTriggerA8Errata) { |
|
1891 // ARM encoding for the top two bits below the sign bit is 'peculiar'. |
|
1892 if (relative >= 0) |
|
1893 relative ^= 0xC00000; |
|
1894 |
|
1895 // All branch offsets should be an even distance. |
|
1896 ASSERT(!(relative & 1)); |
|
1897 // There may be a better way to fix this, but right now put the NOPs first, since in the |
|
1898 // case of an conditional branch this will be coming after an ITTT predicating *three* |
|
1899 // instructions! Looking backwards to modify the ITTT to an IT is not easy, due to |
|
1900 // variable wdith encoding - the previous instruction might *look* like an ITTT but |
|
1901 // actually be the second half of a 2-word op. |
|
1902 instruction[-5] = OP_NOP_T1; |
|
1903 instruction[-4] = OP_NOP_T2a; |
|
1904 instruction[-3] = OP_NOP_T2b; |
|
1905 instruction[-2] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12); |
|
1906 instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1); |
|
1907 } else { |
|
1908 ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1)); |
|
1909 ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16)); |
|
1910 instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16); |
|
1911 instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16); |
|
1912 instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16); |
|
1913 instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16); |
|
1914 instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3); |
|
1915 } |
|
1916 } |
|
1917 |
|
1918 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmFirst(uint16_t op, ARMThumbImmediate imm) |
|
1919 { |
|
1920 return op | (imm.m_value.i << 10) | imm.m_value.imm4; |
|
1921 } |
|
1922 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd, ARMThumbImmediate imm) |
|
1923 { |
|
1924 return (imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8; |
|
1925 } |
|
1926 |
|
1927 class ARMInstructionFormatter { |
|
1928 public: |
|
1929 void oneWordOp5Reg3Imm8(OpcodeID op, RegisterID rd, uint8_t imm) |
|
1930 { |
|
1931 m_buffer.putShort(op | (rd << 8) | imm); |
|
1932 } |
|
1933 |
|
1934 void oneWordOp5Imm5Reg3Reg3(OpcodeID op, uint8_t imm, RegisterID reg1, RegisterID reg2) |
|
1935 { |
|
1936 m_buffer.putShort(op | (imm << 6) | (reg1 << 3) | reg2); |
|
1937 } |
|
1938 |
|
1939 void oneWordOp7Reg3Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2, RegisterID reg3) |
|
1940 { |
|
1941 m_buffer.putShort(op | (reg1 << 6) | (reg2 << 3) | reg3); |
|
1942 } |
|
1943 |
|
1944 void oneWordOp8Imm8(OpcodeID op, uint8_t imm) |
|
1945 { |
|
1946 m_buffer.putShort(op | imm); |
|
1947 } |
|
1948 |
|
1949 void oneWordOp8RegReg143(OpcodeID op, RegisterID reg1, RegisterID reg2) |
|
1950 { |
|
1951 m_buffer.putShort(op | ((reg2 & 8) << 4) | (reg1 << 3) | (reg2 & 7)); |
|
1952 } |
|
1953 void oneWordOp9Imm7(OpcodeID op, uint8_t imm) |
|
1954 { |
|
1955 m_buffer.putShort(op | imm); |
|
1956 } |
|
1957 |
|
1958 void oneWordOp10Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2) |
|
1959 { |
|
1960 m_buffer.putShort(op | (reg1 << 3) | reg2); |
|
1961 } |
|
1962 |
|
1963 void twoWordOp12Reg4FourFours(OpcodeID1 op, RegisterID reg, FourFours ff) |
|
1964 { |
|
1965 m_buffer.putShort(op | reg); |
|
1966 m_buffer.putShort(ff.m_u.value); |
|
1967 } |
|
1968 |
|
1969 void twoWordOp16FourFours(OpcodeID1 op, FourFours ff) |
|
1970 { |
|
1971 m_buffer.putShort(op); |
|
1972 m_buffer.putShort(ff.m_u.value); |
|
1973 } |
|
1974 |
|
1975 void twoWordOp16Op16(OpcodeID1 op1, OpcodeID2 op2) |
|
1976 { |
|
1977 m_buffer.putShort(op1); |
|
1978 m_buffer.putShort(op2); |
|
1979 } |
|
1980 |
|
1981 void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm) |
|
1982 { |
|
1983 ARMThumbImmediate newImm = imm; |
|
1984 newImm.m_value.imm4 = imm4; |
|
1985 |
|
1986 m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmFirst(op, newImm)); |
|
1987 m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, newImm)); |
|
1988 } |
|
1989 |
|
1990 void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm) |
|
1991 { |
|
1992 m_buffer.putShort(op | reg1); |
|
1993 m_buffer.putShort((reg2 << 12) | imm); |
|
1994 } |
|
1995 |
|
1996 // Formats up instructions of the pattern: |
|
1997 // 111111111B11aaaa:bbbb222SA2C2cccc |
|
1998 // Where 1s in the pattern come from op1, 2s in the pattern come from op2, S is the provided size bit. |
|
1999 // Operands provide 5 bit values of the form Aaaaa, Bbbbb, Ccccc. |
|
2000 void vfpOp(OpcodeID1 op1, OpcodeID2 op2, bool size, VFPOperand a, VFPOperand b, VFPOperand c) |
|
2001 { |
|
2002 ASSERT(!(op1 & 0x004f)); |
|
2003 ASSERT(!(op2 & 0xf1af)); |
|
2004 m_buffer.putShort(op1 | b.bits1() << 6 | a.bits4()); |
|
2005 m_buffer.putShort(op2 | b.bits4() << 12 | size << 8 | a.bits1() << 7 | c.bits1() << 5 | c.bits4()); |
|
2006 } |
|
2007 |
|
2008 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2. |
|
2009 // (i.e. +/-(0..255) 32-bit words) |
|
2010 void vfpMemOp(OpcodeID1 op1, OpcodeID2 op2, bool size, RegisterID rn, VFPOperand rd, int32_t imm) |
|
2011 { |
|
2012 bool up = true; |
|
2013 if (imm < 0) { |
|
2014 imm = -imm; |
|
2015 up = false; |
|
2016 } |
|
2017 |
|
2018 uint32_t offset = imm; |
|
2019 ASSERT(!(offset & ~0x3fc)); |
|
2020 offset >>= 2; |
|
2021 |
|
2022 m_buffer.putShort(op1 | (up << 7) | rd.bits1() << 6 | rn); |
|
2023 m_buffer.putShort(op2 | rd.bits4() << 12 | size << 8 | offset); |
|
2024 } |
|
2025 |
|
2026 // Administrative methods: |
|
2027 |
|
2028 size_t size() const { return m_buffer.size(); } |
|
2029 bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); } |
|
2030 void* data() const { return m_buffer.data(); } |
|
2031 void* executableCopy(ExecutablePool* allocator) { return m_buffer.executableCopy(allocator); } |
|
2032 |
|
2033 private: |
|
2034 AssemblerBuffer m_buffer; |
|
2035 } m_formatter; |
|
2036 |
|
2037 Vector<LinkRecord> m_jumpsToLink; |
|
2038 }; |
|
2039 |
|
2040 } // namespace JSC |
|
2041 |
|
2042 #endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2) |
|
2043 |
|
2044 #endif // ARMAssembler_h |