|
1 /* |
|
2 * ARM helper routines |
|
3 * |
|
4 * Copyright (c) 2005-2007 CodeSourcery, LLC |
|
5 * |
|
6 * This library is free software; you can redistribute it and/or |
|
7 * modify it under the terms of the GNU Lesser General Public |
|
8 * License as published by the Free Software Foundation; either |
|
9 * version 2 of the License, or (at your option) any later version. |
|
10 * |
|
11 * This library is distributed in the hope that it will be useful, |
|
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
|
14 * Lesser General Public License for more details. |
|
15 * |
|
16 * You should have received a copy of the GNU Lesser General Public |
|
17 * License along with this library; if not, write to the Free Software |
|
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
|
19 */ |
|
20 #include "exec.h" |
|
21 #include "helpers.h" |
|
22 |
|
23 #define SIGNBIT (uint32_t)0x80000000 |
|
24 #define SIGNBIT64 ((uint64_t)1 << 63) |
|
25 |
|
26 void raise_exception(int tt) |
|
27 { |
|
28 env->exception_index = tt; |
|
29 cpu_loop_exit(); |
|
30 } |
|
31 |
|
32 /* thread support */ |
|
33 |
|
34 static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED; |
|
35 |
|
36 void cpu_lock(void) |
|
37 { |
|
38 spin_lock(&global_cpu_lock); |
|
39 } |
|
40 |
|
41 void cpu_unlock(void) |
|
42 { |
|
43 spin_unlock(&global_cpu_lock); |
|
44 } |
|
45 |
|
46 uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, |
|
47 uint32_t rn, uint32_t maxindex) |
|
48 { |
|
49 uint32_t val; |
|
50 uint32_t tmp; |
|
51 int index; |
|
52 int shift; |
|
53 uint64_t *table; |
|
54 table = (uint64_t *)&env->vfp.regs[rn]; |
|
55 val = 0; |
|
56 for (shift = 0; shift < 32; shift += 8) { |
|
57 index = (ireg >> shift) & 0xff; |
|
58 if (index < maxindex) { |
|
59 tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff; |
|
60 val |= tmp << shift; |
|
61 } else { |
|
62 val |= def & (0xff << shift); |
|
63 } |
|
64 } |
|
65 return val; |
|
66 } |
|
67 |
|
68 #if !defined(CONFIG_USER_ONLY) |
|
69 |
|
70 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr); |
|
71 |
|
72 #define MMUSUFFIX _mmu |
|
73 #define ALIGNED_ONLY |
|
74 |
|
75 #define SHIFT 0 |
|
76 #include "softmmu_template.h" |
|
77 |
|
78 #define SHIFT 1 |
|
79 #include "softmmu_template.h" |
|
80 |
|
81 #define SHIFT 2 |
|
82 #include "softmmu_template.h" |
|
83 |
|
84 #define SHIFT 3 |
|
85 #include "softmmu_template.h" |
|
86 |
|
87 void do_restore_state (void *pc_ptr) |
|
88 { |
|
89 TranslationBlock *tb; |
|
90 unsigned long pc = (unsigned long) pc_ptr; |
|
91 |
|
92 tb = tb_find_pc (pc); |
|
93 if (tb) { |
|
94 cpu_restore_state (tb, env, pc, NULL); |
|
95 } |
|
96 } |
|
97 |
|
98 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr) |
|
99 { |
|
100 /* TODO: Legacy alignment behavior and v6 unaligned accesses. */ |
|
101 if (is_write == 2) { |
|
102 env->cp15.c5_insn = 1; |
|
103 env->cp15.c6_insn = addr; |
|
104 do_restore_state (retaddr); |
|
105 raise_exception (EXCP_PREFETCH_ABORT); |
|
106 } else { |
|
107 env->cp15.c5_data = 1; |
|
108 env->cp15.c6_data = addr; |
|
109 do_restore_state (retaddr); |
|
110 raise_exception (EXCP_DATA_ABORT); |
|
111 } |
|
112 } |
|
113 |
|
114 /* try to fill the TLB and return an exception if error. If retaddr is |
|
115 NULL, it means that the function was called in C code (i.e. not |
|
116 from generated code or from helper.c) */ |
|
117 /* XXX: fix it to restore all registers */ |
|
118 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr) |
|
119 { |
|
120 CPUState *saved_env; |
|
121 int ret; |
|
122 |
|
123 /* XXX: hack to restore env in all cases, even if not called from |
|
124 generated code */ |
|
125 saved_env = env; |
|
126 env = cpu_single_env; |
|
127 ret = cpu_arm_handle_mmu_fault(env, addr, is_write, mmu_idx, 1); |
|
128 if (unlikely(ret)) { |
|
129 if (retaddr) { |
|
130 /* now we have a real cpu fault */ |
|
131 do_restore_state(retaddr); |
|
132 } |
|
133 raise_exception(env->exception_index); |
|
134 } |
|
135 env = saved_env; |
|
136 } |
|
137 #endif |
|
138 |
|
139 /* FIXME: Pass an axplicit pointer to QF to CPUState, and move saturating |
|
140 instructions into helper.c */ |
|
141 uint32_t HELPER(add_setq)(uint32_t a, uint32_t b) |
|
142 { |
|
143 uint32_t res = a + b; |
|
144 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) |
|
145 env->QF = 1; |
|
146 return res; |
|
147 } |
|
148 |
|
149 uint32_t HELPER(add_saturate)(uint32_t a, uint32_t b) |
|
150 { |
|
151 uint32_t res = a + b; |
|
152 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) { |
|
153 env->QF = 1; |
|
154 res = ~(((int32_t)a >> 31) ^ SIGNBIT); |
|
155 } |
|
156 return res; |
|
157 } |
|
158 |
|
159 uint32_t HELPER(sub_saturate)(uint32_t a, uint32_t b) |
|
160 { |
|
161 uint32_t res = a - b; |
|
162 if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) { |
|
163 env->QF = 1; |
|
164 res = ~(((int32_t)a >> 31) ^ SIGNBIT); |
|
165 } |
|
166 return res; |
|
167 } |
|
168 |
|
169 uint32_t HELPER(double_saturate)(int32_t val) |
|
170 { |
|
171 uint32_t res; |
|
172 if (val >= 0x40000000) { |
|
173 res = ~SIGNBIT; |
|
174 env->QF = 1; |
|
175 } else if (val <= (int32_t)0xc0000000) { |
|
176 res = SIGNBIT; |
|
177 env->QF = 1; |
|
178 } else { |
|
179 res = val << 1; |
|
180 } |
|
181 return res; |
|
182 } |
|
183 |
|
184 uint32_t HELPER(add_usaturate)(uint32_t a, uint32_t b) |
|
185 { |
|
186 uint32_t res = a + b; |
|
187 if (res < a) { |
|
188 env->QF = 1; |
|
189 res = ~0; |
|
190 } |
|
191 return res; |
|
192 } |
|
193 |
|
194 uint32_t HELPER(sub_usaturate)(uint32_t a, uint32_t b) |
|
195 { |
|
196 uint32_t res = a - b; |
|
197 if (res > a) { |
|
198 env->QF = 1; |
|
199 res = 0; |
|
200 } |
|
201 return res; |
|
202 } |
|
203 |
|
204 /* Signed saturation. */ |
|
205 static inline uint32_t do_ssat(int32_t val, int shift) |
|
206 { |
|
207 int32_t top; |
|
208 uint32_t mask; |
|
209 |
|
210 top = val >> shift; |
|
211 mask = (1u << shift) - 1; |
|
212 if (top > 0) { |
|
213 env->QF = 1; |
|
214 return mask; |
|
215 } else if (top < -1) { |
|
216 env->QF = 1; |
|
217 return ~mask; |
|
218 } |
|
219 return val; |
|
220 } |
|
221 |
|
222 /* Unsigned saturation. */ |
|
223 static inline uint32_t do_usat(int32_t val, int shift) |
|
224 { |
|
225 uint32_t max; |
|
226 |
|
227 max = (1u << shift) - 1; |
|
228 if (val < 0) { |
|
229 env->QF = 1; |
|
230 return 0; |
|
231 } else if (val > max) { |
|
232 env->QF = 1; |
|
233 return max; |
|
234 } |
|
235 return val; |
|
236 } |
|
237 |
|
238 /* Signed saturate. */ |
|
239 uint32_t HELPER(ssat)(uint32_t x, uint32_t shift) |
|
240 { |
|
241 return do_ssat(x, shift); |
|
242 } |
|
243 |
|
244 /* Dual halfword signed saturate. */ |
|
245 uint32_t HELPER(ssat16)(uint32_t x, uint32_t shift) |
|
246 { |
|
247 uint32_t res; |
|
248 |
|
249 res = (uint16_t)do_ssat((int16_t)x, shift); |
|
250 res |= do_ssat(((int32_t)x) >> 16, shift) << 16; |
|
251 return res; |
|
252 } |
|
253 |
|
254 /* Unsigned saturate. */ |
|
255 uint32_t HELPER(usat)(uint32_t x, uint32_t shift) |
|
256 { |
|
257 return do_usat(x, shift); |
|
258 } |
|
259 |
|
260 /* Dual halfword unsigned saturate. */ |
|
261 uint32_t HELPER(usat16)(uint32_t x, uint32_t shift) |
|
262 { |
|
263 uint32_t res; |
|
264 |
|
265 res = (uint16_t)do_usat((int16_t)x, shift); |
|
266 res |= do_usat(((int32_t)x) >> 16, shift) << 16; |
|
267 return res; |
|
268 } |
|
269 |
|
270 void HELPER(wfi)(void) |
|
271 { |
|
272 env->exception_index = EXCP_HLT; |
|
273 env->halted = 1; |
|
274 cpu_loop_exit(); |
|
275 } |
|
276 |
|
277 void HELPER(exception)(uint32_t excp) |
|
278 { |
|
279 env->exception_index = excp; |
|
280 cpu_loop_exit(); |
|
281 } |
|
282 |
|
283 uint32_t HELPER(cpsr_read)(void) |
|
284 { |
|
285 return cpsr_read(env) & ~CPSR_EXEC; |
|
286 } |
|
287 |
|
288 void HELPER(cpsr_write)(uint32_t val, uint32_t mask) |
|
289 { |
|
290 cpsr_write(env, val, mask); |
|
291 } |
|
292 |
|
293 /* Access to user mode registers from privileged modes. */ |
|
294 uint32_t HELPER(get_user_reg)(uint32_t regno) |
|
295 { |
|
296 uint32_t val; |
|
297 |
|
298 if (regno == 13) { |
|
299 val = env->banked_r13[0]; |
|
300 } else if (regno == 14) { |
|
301 val = env->banked_r14[0]; |
|
302 } else if (regno >= 8 |
|
303 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) { |
|
304 val = env->usr_regs[regno - 8]; |
|
305 } else { |
|
306 val = env->regs[regno]; |
|
307 } |
|
308 return val; |
|
309 } |
|
310 |
|
311 void HELPER(set_user_reg)(uint32_t regno, uint32_t val) |
|
312 { |
|
313 if (regno == 13) { |
|
314 env->banked_r13[0] = val; |
|
315 } else if (regno == 14) { |
|
316 env->banked_r14[0] = val; |
|
317 } else if (regno >= 8 |
|
318 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) { |
|
319 env->usr_regs[regno - 8] = val; |
|
320 } else { |
|
321 env->regs[regno] = val; |
|
322 } |
|
323 } |
|
324 |
|
325 /* ??? Flag setting arithmetic is awkward because we need to do comparisons. |
|
326 The only way to do that in TCG is a conditional branch, which clobbers |
|
327 all our temporaries. For now implement these as helper functions. */ |
|
328 |
|
329 uint32_t HELPER (add_cc)(uint32_t a, uint32_t b) |
|
330 { |
|
331 uint32_t result; |
|
332 result = T0 + T1; |
|
333 env->NF = env->ZF = result; |
|
334 env->CF = result < a; |
|
335 env->VF = (a ^ b ^ -1) & (a ^ result); |
|
336 return result; |
|
337 } |
|
338 |
|
339 uint32_t HELPER(adc_cc)(uint32_t a, uint32_t b) |
|
340 { |
|
341 uint32_t result; |
|
342 if (!env->CF) { |
|
343 result = a + b; |
|
344 env->CF = result < a; |
|
345 } else { |
|
346 result = a + b + 1; |
|
347 env->CF = result <= a; |
|
348 } |
|
349 env->VF = (a ^ b ^ -1) & (a ^ result); |
|
350 env->NF = env->ZF = result; |
|
351 return result; |
|
352 } |
|
353 |
|
354 uint32_t HELPER(sub_cc)(uint32_t a, uint32_t b) |
|
355 { |
|
356 uint32_t result; |
|
357 result = a - b; |
|
358 env->NF = env->ZF = result; |
|
359 env->CF = a >= b; |
|
360 env->VF = (a ^ b) & (a ^ result); |
|
361 return result; |
|
362 } |
|
363 |
|
364 uint32_t HELPER(sbc_cc)(uint32_t a, uint32_t b) |
|
365 { |
|
366 uint32_t result; |
|
367 if (!env->CF) { |
|
368 result = a - b - 1; |
|
369 env->CF = a > b; |
|
370 } else { |
|
371 result = a - b; |
|
372 env->CF = a >= b; |
|
373 } |
|
374 env->VF = (a ^ b) & (a ^ result); |
|
375 env->NF = env->ZF = result; |
|
376 return result; |
|
377 } |
|
378 |
|
379 /* Similarly for variable shift instructions. */ |
|
380 |
|
381 uint32_t HELPER(shl)(uint32_t x, uint32_t i) |
|
382 { |
|
383 int shift = i & 0xff; |
|
384 if (shift >= 32) |
|
385 return 0; |
|
386 return x << shift; |
|
387 } |
|
388 |
|
389 uint32_t HELPER(shr)(uint32_t x, uint32_t i) |
|
390 { |
|
391 int shift = i & 0xff; |
|
392 if (shift >= 32) |
|
393 return 0; |
|
394 return (uint32_t)x >> shift; |
|
395 } |
|
396 |
|
397 uint32_t HELPER(sar)(uint32_t x, uint32_t i) |
|
398 { |
|
399 int shift = i & 0xff; |
|
400 if (shift >= 32) |
|
401 shift = 31; |
|
402 return (int32_t)x >> shift; |
|
403 } |
|
404 |
|
405 uint32_t HELPER(ror)(uint32_t x, uint32_t i) |
|
406 { |
|
407 int shift = i & 0xff; |
|
408 if (shift == 0) |
|
409 return x; |
|
410 return (x >> shift) | (x << (32 - shift)); |
|
411 } |
|
412 |
|
413 uint32_t HELPER(shl_cc)(uint32_t x, uint32_t i) |
|
414 { |
|
415 int shift = i & 0xff; |
|
416 if (shift >= 32) { |
|
417 if (shift == 32) |
|
418 env->CF = x & 1; |
|
419 else |
|
420 env->CF = 0; |
|
421 return 0; |
|
422 } else if (shift != 0) { |
|
423 env->CF = (x >> (32 - shift)) & 1; |
|
424 return x << shift; |
|
425 } |
|
426 return x; |
|
427 } |
|
428 |
|
429 uint32_t HELPER(shr_cc)(uint32_t x, uint32_t i) |
|
430 { |
|
431 int shift = i & 0xff; |
|
432 if (shift >= 32) { |
|
433 if (shift == 32) |
|
434 env->CF = (x >> 31) & 1; |
|
435 else |
|
436 env->CF = 0; |
|
437 return 0; |
|
438 } else if (shift != 0) { |
|
439 env->CF = (x >> (shift - 1)) & 1; |
|
440 return x >> shift; |
|
441 } |
|
442 return x; |
|
443 } |
|
444 |
|
445 uint32_t HELPER(sar_cc)(uint32_t x, uint32_t i) |
|
446 { |
|
447 int shift = i & 0xff; |
|
448 if (shift >= 32) { |
|
449 env->CF = (x >> 31) & 1; |
|
450 return (int32_t)x >> 31; |
|
451 } else if (shift != 0) { |
|
452 env->CF = (x >> (shift - 1)) & 1; |
|
453 return (int32_t)x >> shift; |
|
454 } |
|
455 return x; |
|
456 } |
|
457 |
|
458 uint32_t HELPER(ror_cc)(uint32_t x, uint32_t i) |
|
459 { |
|
460 int shift1, shift; |
|
461 shift1 = i & 0xff; |
|
462 shift = shift1 & 0x1f; |
|
463 if (shift == 0) { |
|
464 if (shift1 != 0) |
|
465 env->CF = (x >> 31) & 1; |
|
466 return x; |
|
467 } else { |
|
468 env->CF = (x >> (shift - 1)) & 1; |
|
469 return ((uint32_t)x >> shift) | (x << (32 - shift)); |
|
470 } |
|
471 } |
|
472 |
|
473 uint64_t HELPER(neon_add_saturate_s64)(uint64_t src1, uint64_t src2) |
|
474 { |
|
475 uint64_t res; |
|
476 |
|
477 res = src1 + src2; |
|
478 if (((res ^ src1) & SIGNBIT64) && !((src1 ^ src2) & SIGNBIT64)) { |
|
479 env->QF = 1; |
|
480 res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64; |
|
481 } |
|
482 return res; |
|
483 } |
|
484 |
|
485 uint64_t HELPER(neon_add_saturate_u64)(uint64_t src1, uint64_t src2) |
|
486 { |
|
487 uint64_t res; |
|
488 |
|
489 res = src1 + src2; |
|
490 if (res < src1) { |
|
491 env->QF = 1; |
|
492 res = ~(uint64_t)0; |
|
493 } |
|
494 return res; |
|
495 } |
|
496 |
|
497 uint64_t HELPER(neon_sub_saturate_s64)(uint64_t src1, uint64_t src2) |
|
498 { |
|
499 uint64_t res; |
|
500 |
|
501 res = src1 - src2; |
|
502 if (((res ^ src1) & SIGNBIT64) && ((src1 ^ src2) & SIGNBIT64)) { |
|
503 env->QF = 1; |
|
504 res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64; |
|
505 } |
|
506 return res; |
|
507 } |
|
508 |
|
509 uint64_t HELPER(neon_sub_saturate_u64)(uint64_t src1, uint64_t src2) |
|
510 { |
|
511 uint64_t res; |
|
512 |
|
513 if (src1 < src2) { |
|
514 env->QF = 1; |
|
515 res = 0; |
|
516 } else { |
|
517 res = src1 - src2; |
|
518 } |
|
519 return res; |
|
520 } |
|
521 |
|
522 /* These need to return a pair of value, so still use T0/T1. */ |
|
523 /* Transpose. Argument order is rather strange to avoid special casing |
|
524 the tranlation code. |
|
525 On input T0 = rm, T1 = rd. On output T0 = rd, T1 = rm */ |
|
526 void HELPER(neon_trn_u8)(void) |
|
527 { |
|
528 uint32_t rd; |
|
529 uint32_t rm; |
|
530 rd = ((T0 & 0x00ff00ff) << 8) | (T1 & 0x00ff00ff); |
|
531 rm = ((T1 & 0xff00ff00) >> 8) | (T0 & 0xff00ff00); |
|
532 T0 = rd; |
|
533 T1 = rm; |
|
534 } |
|
535 |
|
536 void HELPER(neon_trn_u16)(void) |
|
537 { |
|
538 uint32_t rd; |
|
539 uint32_t rm; |
|
540 rd = (T0 << 16) | (T1 & 0xffff); |
|
541 rm = (T1 >> 16) | (T0 & 0xffff0000); |
|
542 T0 = rd; |
|
543 T1 = rm; |
|
544 } |
|
545 |
|
546 /* Worker routines for zip and unzip. */ |
|
547 void HELPER(neon_unzip_u8)(void) |
|
548 { |
|
549 uint32_t rd; |
|
550 uint32_t rm; |
|
551 rd = (T0 & 0xff) | ((T0 >> 8) & 0xff00) |
|
552 | ((T1 << 16) & 0xff0000) | ((T1 << 8) & 0xff000000); |
|
553 rm = ((T0 >> 8) & 0xff) | ((T0 >> 16) & 0xff00) |
|
554 | ((T1 << 8) & 0xff0000) | (T1 & 0xff000000); |
|
555 T0 = rd; |
|
556 T1 = rm; |
|
557 } |
|
558 |
|
559 void HELPER(neon_zip_u8)(void) |
|
560 { |
|
561 uint32_t rd; |
|
562 uint32_t rm; |
|
563 rd = (T0 & 0xff) | ((T1 << 8) & 0xff00) |
|
564 | ((T0 << 16) & 0xff0000) | ((T1 << 24) & 0xff000000); |
|
565 rm = ((T0 >> 16) & 0xff) | ((T1 >> 8) & 0xff00) |
|
566 | ((T0 >> 8) & 0xff0000) | (T1 & 0xff000000); |
|
567 T0 = rd; |
|
568 T1 = rm; |
|
569 } |
|
570 |
|
571 void HELPER(neon_zip_u16)(void) |
|
572 { |
|
573 uint32_t tmp; |
|
574 |
|
575 tmp = (T0 & 0xffff) | (T1 << 16); |
|
576 T1 = (T1 & 0xffff0000) | (T0 >> 16); |
|
577 T0 = tmp; |
|
578 } |