2 * AArch64 specific helpers
4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
23 #include "gdbstub/helpers.h"
24 #include "exec/helper-proto.h"
25 #include "qemu/host-utils.h"
27 #include "qemu/main-loop.h"
28 #include "qemu/bitops.h"
29 #include "internals.h"
30 #include "qemu/crc32c.h"
31 #include "exec/exec-all.h"
32 #include "exec/cpu_ldst.h"
33 #include "qemu/int128.h"
34 #include "qemu/atomic128.h"
35 #include "fpu/softfloat.h"
36 #include <zlib.h> /* For crc32 */
38 /* C2.4.7 Multiply and divide */
39 /* special cases for 0 and LLONG_MIN are mandated by the standard */
40 uint64_t HELPER(udiv64)(uint64_t num, uint64_t den)
48 int64_t HELPER(sdiv64)(int64_t num, int64_t den)
53 if (num == LLONG_MIN && den == -1) {
59 uint64_t HELPER(rbit64)(uint64_t x)
64 void HELPER(msr_i_spsel)(CPUARMState *env, uint32_t imm)
66 update_spsel(env, imm);
69 static void daif_check(CPUARMState *env, uint32_t op,
70 uint32_t imm, uintptr_t ra)
72 /* DAIF update to PSTATE. This is OK from EL0 only if UMA is set. */
73 if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) {
74 raise_exception_ra(env, EXCP_UDEF,
75 syn_aa64_sysregtrap(0, extract32(op, 0, 3),
76 extract32(op, 3, 3), 4,
78 exception_target_el(env), ra);
82 void HELPER(msr_i_daifset)(CPUARMState *env, uint32_t imm)
84 daif_check(env, 0x1e, imm, GETPC());
85 env->daif |= (imm << 6) & PSTATE_DAIF;
86 arm_rebuild_hflags(env);
89 void HELPER(msr_i_daifclear)(CPUARMState *env, uint32_t imm)
91 daif_check(env, 0x1f, imm, GETPC());
92 env->daif &= ~((imm << 6) & PSTATE_DAIF);
93 arm_rebuild_hflags(env);
96 /* Convert a softfloat float_relation_ (as returned by
97 * the float*_compare functions) to the correct ARM
100 static inline uint32_t float_rel_to_flags(int res)
104 case float_relation_equal:
105 flags = PSTATE_Z | PSTATE_C;
107 case float_relation_less:
110 case float_relation_greater:
113 case float_relation_unordered:
115 flags = PSTATE_C | PSTATE_V;
121 uint64_t HELPER(vfp_cmph_a64)(uint32_t x, uint32_t y, void *fp_status)
123 return float_rel_to_flags(float16_compare_quiet(x, y, fp_status));
126 uint64_t HELPER(vfp_cmpeh_a64)(uint32_t x, uint32_t y, void *fp_status)
128 return float_rel_to_flags(float16_compare(x, y, fp_status));
131 uint64_t HELPER(vfp_cmps_a64)(float32 x, float32 y, void *fp_status)
133 return float_rel_to_flags(float32_compare_quiet(x, y, fp_status));
136 uint64_t HELPER(vfp_cmpes_a64)(float32 x, float32 y, void *fp_status)
138 return float_rel_to_flags(float32_compare(x, y, fp_status));
141 uint64_t HELPER(vfp_cmpd_a64)(float64 x, float64 y, void *fp_status)
143 return float_rel_to_flags(float64_compare_quiet(x, y, fp_status));
146 uint64_t HELPER(vfp_cmped_a64)(float64 x, float64 y, void *fp_status)
148 return float_rel_to_flags(float64_compare(x, y, fp_status));
151 float32 HELPER(vfp_mulxs)(float32 a, float32 b, void *fpstp)
153 float_status *fpst = fpstp;
155 a = float32_squash_input_denormal(a, fpst);
156 b = float32_squash_input_denormal(b, fpst);
158 if ((float32_is_zero(a) && float32_is_infinity(b)) ||
159 (float32_is_infinity(a) && float32_is_zero(b))) {
160 /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
161 return make_float32((1U << 30) |
162 ((float32_val(a) ^ float32_val(b)) & (1U << 31)));
164 return float32_mul(a, b, fpst);
167 float64 HELPER(vfp_mulxd)(float64 a, float64 b, void *fpstp)
169 float_status *fpst = fpstp;
171 a = float64_squash_input_denormal(a, fpst);
172 b = float64_squash_input_denormal(b, fpst);
174 if ((float64_is_zero(a) && float64_is_infinity(b)) ||
175 (float64_is_infinity(a) && float64_is_zero(b))) {
176 /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
177 return make_float64((1ULL << 62) |
178 ((float64_val(a) ^ float64_val(b)) & (1ULL << 63)));
180 return float64_mul(a, b, fpst);
183 /* 64bit/double versions of the neon float compare functions */
184 uint64_t HELPER(neon_ceq_f64)(float64 a, float64 b, void *fpstp)
186 float_status *fpst = fpstp;
187 return -float64_eq_quiet(a, b, fpst);
190 uint64_t HELPER(neon_cge_f64)(float64 a, float64 b, void *fpstp)
192 float_status *fpst = fpstp;
193 return -float64_le(b, a, fpst);
196 uint64_t HELPER(neon_cgt_f64)(float64 a, float64 b, void *fpstp)
198 float_status *fpst = fpstp;
199 return -float64_lt(b, a, fpst);
202 /* Reciprocal step and sqrt step. Note that unlike the A32/T32
203 * versions, these do a fully fused multiply-add or
204 * multiply-add-and-halve.
207 uint32_t HELPER(recpsf_f16)(uint32_t a, uint32_t b, void *fpstp)
209 float_status *fpst = fpstp;
211 a = float16_squash_input_denormal(a, fpst);
212 b = float16_squash_input_denormal(b, fpst);
215 if ((float16_is_infinity(a) && float16_is_zero(b)) ||
216 (float16_is_infinity(b) && float16_is_zero(a))) {
219 return float16_muladd(a, b, float16_two, 0, fpst);
222 float32 HELPER(recpsf_f32)(float32 a, float32 b, void *fpstp)
224 float_status *fpst = fpstp;
226 a = float32_squash_input_denormal(a, fpst);
227 b = float32_squash_input_denormal(b, fpst);
230 if ((float32_is_infinity(a) && float32_is_zero(b)) ||
231 (float32_is_infinity(b) && float32_is_zero(a))) {
234 return float32_muladd(a, b, float32_two, 0, fpst);
237 float64 HELPER(recpsf_f64)(float64 a, float64 b, void *fpstp)
239 float_status *fpst = fpstp;
241 a = float64_squash_input_denormal(a, fpst);
242 b = float64_squash_input_denormal(b, fpst);
245 if ((float64_is_infinity(a) && float64_is_zero(b)) ||
246 (float64_is_infinity(b) && float64_is_zero(a))) {
249 return float64_muladd(a, b, float64_two, 0, fpst);
252 uint32_t HELPER(rsqrtsf_f16)(uint32_t a, uint32_t b, void *fpstp)
254 float_status *fpst = fpstp;
256 a = float16_squash_input_denormal(a, fpst);
257 b = float16_squash_input_denormal(b, fpst);
260 if ((float16_is_infinity(a) && float16_is_zero(b)) ||
261 (float16_is_infinity(b) && float16_is_zero(a))) {
262 return float16_one_point_five;
264 return float16_muladd(a, b, float16_three, float_muladd_halve_result, fpst);
267 float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, void *fpstp)
269 float_status *fpst = fpstp;
271 a = float32_squash_input_denormal(a, fpst);
272 b = float32_squash_input_denormal(b, fpst);
275 if ((float32_is_infinity(a) && float32_is_zero(b)) ||
276 (float32_is_infinity(b) && float32_is_zero(a))) {
277 return float32_one_point_five;
279 return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst);
282 float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, void *fpstp)
284 float_status *fpst = fpstp;
286 a = float64_squash_input_denormal(a, fpst);
287 b = float64_squash_input_denormal(b, fpst);
290 if ((float64_is_infinity(a) && float64_is_zero(b)) ||
291 (float64_is_infinity(b) && float64_is_zero(a))) {
292 return float64_one_point_five;
294 return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst);
297 /* Pairwise long add: add pairs of adjacent elements into
298 * double-width elements in the result (eg _s8 is an 8x8->16 op)
300 uint64_t HELPER(neon_addlp_s8)(uint64_t a)
302 uint64_t nsignmask = 0x0080008000800080ULL;
303 uint64_t wsignmask = 0x8000800080008000ULL;
304 uint64_t elementmask = 0x00ff00ff00ff00ffULL;
306 uint64_t res, signres;
308 /* Extract odd elements, sign extend each to a 16 bit field */
309 tmp1 = a & elementmask;
312 tmp1 = (tmp1 - nsignmask) ^ wsignmask;
313 /* Ditto for the even elements */
314 tmp2 = (a >> 8) & elementmask;
317 tmp2 = (tmp2 - nsignmask) ^ wsignmask;
319 /* calculate the result by summing bits 0..14, 16..22, etc,
320 * and then adjusting the sign bits 15, 23, etc manually.
321 * This ensures the addition can't overflow the 16 bit field.
323 signres = (tmp1 ^ tmp2) & wsignmask;
324 res = (tmp1 & ~wsignmask) + (tmp2 & ~wsignmask);
330 uint64_t HELPER(neon_addlp_u8)(uint64_t a)
334 tmp = a & 0x00ff00ff00ff00ffULL;
335 tmp += (a >> 8) & 0x00ff00ff00ff00ffULL;
339 uint64_t HELPER(neon_addlp_s16)(uint64_t a)
341 int32_t reslo, reshi;
343 reslo = (int32_t)(int16_t)a + (int32_t)(int16_t)(a >> 16);
344 reshi = (int32_t)(int16_t)(a >> 32) + (int32_t)(int16_t)(a >> 48);
346 return (uint32_t)reslo | (((uint64_t)reshi) << 32);
349 uint64_t HELPER(neon_addlp_u16)(uint64_t a)
353 tmp = a & 0x0000ffff0000ffffULL;
354 tmp += (a >> 16) & 0x0000ffff0000ffffULL;
358 /* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
359 uint32_t HELPER(frecpx_f16)(uint32_t a, void *fpstp)
361 float_status *fpst = fpstp;
362 uint16_t val16, sbit;
365 if (float16_is_any_nan(a)) {
367 if (float16_is_signaling_nan(a, fpst)) {
368 float_raise(float_flag_invalid, fpst);
369 if (!fpst->default_nan_mode) {
370 nan = float16_silence_nan(a, fpst);
373 if (fpst->default_nan_mode) {
374 nan = float16_default_nan(fpst);
379 a = float16_squash_input_denormal(a, fpst);
381 val16 = float16_val(a);
382 sbit = 0x8000 & val16;
383 exp = extract32(val16, 10, 5);
386 return make_float16(deposit32(sbit, 10, 5, 0x1e));
388 return make_float16(deposit32(sbit, 10, 5, ~exp));
392 float32 HELPER(frecpx_f32)(float32 a, void *fpstp)
394 float_status *fpst = fpstp;
395 uint32_t val32, sbit;
398 if (float32_is_any_nan(a)) {
400 if (float32_is_signaling_nan(a, fpst)) {
401 float_raise(float_flag_invalid, fpst);
402 if (!fpst->default_nan_mode) {
403 nan = float32_silence_nan(a, fpst);
406 if (fpst->default_nan_mode) {
407 nan = float32_default_nan(fpst);
412 a = float32_squash_input_denormal(a, fpst);
414 val32 = float32_val(a);
415 sbit = 0x80000000ULL & val32;
416 exp = extract32(val32, 23, 8);
419 return make_float32(sbit | (0xfe << 23));
421 return make_float32(sbit | (~exp & 0xff) << 23);
425 float64 HELPER(frecpx_f64)(float64 a, void *fpstp)
427 float_status *fpst = fpstp;
428 uint64_t val64, sbit;
431 if (float64_is_any_nan(a)) {
433 if (float64_is_signaling_nan(a, fpst)) {
434 float_raise(float_flag_invalid, fpst);
435 if (!fpst->default_nan_mode) {
436 nan = float64_silence_nan(a, fpst);
439 if (fpst->default_nan_mode) {
440 nan = float64_default_nan(fpst);
445 a = float64_squash_input_denormal(a, fpst);
447 val64 = float64_val(a);
448 sbit = 0x8000000000000000ULL & val64;
449 exp = extract64(float64_val(a), 52, 11);
452 return make_float64(sbit | (0x7feULL << 52));
454 return make_float64(sbit | (~exp & 0x7ffULL) << 52);
458 float32 HELPER(fcvtx_f64_to_f32)(float64 a, CPUARMState *env)
460 /* Von Neumann rounding is implemented by using round-to-zero
461 * and then setting the LSB of the result if Inexact was raised.
464 float_status *fpst = &env->vfp.fp_status;
465 float_status tstat = *fpst;
468 set_float_rounding_mode(float_round_to_zero, &tstat);
469 set_float_exception_flags(0, &tstat);
470 r = float64_to_float32(a, &tstat);
471 exflags = get_float_exception_flags(&tstat);
472 if (exflags & float_flag_inexact) {
473 r = make_float32(float32_val(r) | 1);
475 exflags |= get_float_exception_flags(fpst);
476 set_float_exception_flags(exflags, fpst);
480 /* 64-bit versions of the CRC helpers. Note that although the operation
481 * (and the prototypes of crc32c() and crc32() mean that only the bottom
482 * 32 bits of the accumulator and result are used, we pass and return
483 * uint64_t for convenience of the generated code. Unlike the 32-bit
484 * instruction set versions, val may genuinely have 64 bits of data in it.
485 * The upper bytes of val (above the number specified by 'bytes') must have
486 * been zeroed out by the caller.
488 uint64_t HELPER(crc32_64)(uint64_t acc, uint64_t val, uint32_t bytes)
494 /* zlib crc32 converts the accumulator and output to one's complement. */
495 return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
498 uint64_t HELPER(crc32c_64)(uint64_t acc, uint64_t val, uint32_t bytes)
504 /* Linux crc32c converts the output to one's complement. */
505 return crc32c(acc, buf, bytes) ^ 0xffffffff;
509 * AdvSIMD half-precision
512 #define ADVSIMD_HELPER(name, suffix) HELPER(glue(glue(advsimd_, name), suffix))
514 #define ADVSIMD_HALFOP(name) \
515 uint32_t ADVSIMD_HELPER(name, h)(uint32_t a, uint32_t b, void *fpstp) \
517 float_status *fpst = fpstp; \
518 return float16_ ## name(a, b, fpst); \
527 ADVSIMD_HALFOP(minnum)
528 ADVSIMD_HALFOP(maxnum)
530 #define ADVSIMD_TWOHALFOP(name) \
531 uint32_t ADVSIMD_HELPER(name, 2h)(uint32_t two_a, uint32_t two_b, void *fpstp) \
533 float16 a1, a2, b1, b2; \
535 float_status *fpst = fpstp; \
536 a1 = extract32(two_a, 0, 16); \
537 a2 = extract32(two_a, 16, 16); \
538 b1 = extract32(two_b, 0, 16); \
539 b2 = extract32(two_b, 16, 16); \
540 r1 = float16_ ## name(a1, b1, fpst); \
541 r2 = float16_ ## name(a2, b2, fpst); \
542 return deposit32(r1, 16, 16, r2); \
545 ADVSIMD_TWOHALFOP(add)
546 ADVSIMD_TWOHALFOP(sub)
547 ADVSIMD_TWOHALFOP(mul)
548 ADVSIMD_TWOHALFOP(div)
549 ADVSIMD_TWOHALFOP(min)
550 ADVSIMD_TWOHALFOP(max)
551 ADVSIMD_TWOHALFOP(minnum)
552 ADVSIMD_TWOHALFOP(maxnum)
554 /* Data processing - scalar floating-point and advanced SIMD */
555 static float16 float16_mulx(float16 a, float16 b, void *fpstp)
557 float_status *fpst = fpstp;
559 a = float16_squash_input_denormal(a, fpst);
560 b = float16_squash_input_denormal(b, fpst);
562 if ((float16_is_zero(a) && float16_is_infinity(b)) ||
563 (float16_is_infinity(a) && float16_is_zero(b))) {
564 /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
565 return make_float16((1U << 14) |
566 ((float16_val(a) ^ float16_val(b)) & (1U << 15)));
568 return float16_mul(a, b, fpst);
572 ADVSIMD_TWOHALFOP(mulx)
574 /* fused multiply-accumulate */
575 uint32_t HELPER(advsimd_muladdh)(uint32_t a, uint32_t b, uint32_t c,
578 float_status *fpst = fpstp;
579 return float16_muladd(a, b, c, 0, fpst);
582 uint32_t HELPER(advsimd_muladd2h)(uint32_t two_a, uint32_t two_b,
583 uint32_t two_c, void *fpstp)
585 float_status *fpst = fpstp;
586 float16 a1, a2, b1, b2, c1, c2;
588 a1 = extract32(two_a, 0, 16);
589 a2 = extract32(two_a, 16, 16);
590 b1 = extract32(two_b, 0, 16);
591 b2 = extract32(two_b, 16, 16);
592 c1 = extract32(two_c, 0, 16);
593 c2 = extract32(two_c, 16, 16);
594 r1 = float16_muladd(a1, b1, c1, 0, fpst);
595 r2 = float16_muladd(a2, b2, c2, 0, fpst);
596 return deposit32(r1, 16, 16, r2);
600 * Floating point comparisons produce an integer result. Softfloat
601 * routines return float_relation types which we convert to the 0/-1
605 #define ADVSIMD_CMPRES(test) (test) ? 0xffff : 0
607 uint32_t HELPER(advsimd_ceq_f16)(uint32_t a, uint32_t b, void *fpstp)
609 float_status *fpst = fpstp;
610 int compare = float16_compare_quiet(a, b, fpst);
611 return ADVSIMD_CMPRES(compare == float_relation_equal);
614 uint32_t HELPER(advsimd_cge_f16)(uint32_t a, uint32_t b, void *fpstp)
616 float_status *fpst = fpstp;
617 int compare = float16_compare(a, b, fpst);
618 return ADVSIMD_CMPRES(compare == float_relation_greater ||
619 compare == float_relation_equal);
622 uint32_t HELPER(advsimd_cgt_f16)(uint32_t a, uint32_t b, void *fpstp)
624 float_status *fpst = fpstp;
625 int compare = float16_compare(a, b, fpst);
626 return ADVSIMD_CMPRES(compare == float_relation_greater);
629 uint32_t HELPER(advsimd_acge_f16)(uint32_t a, uint32_t b, void *fpstp)
631 float_status *fpst = fpstp;
632 float16 f0 = float16_abs(a);
633 float16 f1 = float16_abs(b);
634 int compare = float16_compare(f0, f1, fpst);
635 return ADVSIMD_CMPRES(compare == float_relation_greater ||
636 compare == float_relation_equal);
639 uint32_t HELPER(advsimd_acgt_f16)(uint32_t a, uint32_t b, void *fpstp)
641 float_status *fpst = fpstp;
642 float16 f0 = float16_abs(a);
643 float16 f1 = float16_abs(b);
644 int compare = float16_compare(f0, f1, fpst);
645 return ADVSIMD_CMPRES(compare == float_relation_greater);
648 /* round to integral */
649 uint32_t HELPER(advsimd_rinth_exact)(uint32_t x, void *fp_status)
651 return float16_round_to_int(x, fp_status);
654 uint32_t HELPER(advsimd_rinth)(uint32_t x, void *fp_status)
656 int old_flags = get_float_exception_flags(fp_status), new_flags;
659 ret = float16_round_to_int(x, fp_status);
661 /* Suppress any inexact exceptions the conversion produced */
662 if (!(old_flags & float_flag_inexact)) {
663 new_flags = get_float_exception_flags(fp_status);
664 set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
671 * Half-precision floating point conversion functions
673 * There are a multitude of conversion functions with various
674 * different rounding modes. This is dealt with by the calling code
675 * setting the mode appropriately before calling the helper.
678 uint32_t HELPER(advsimd_f16tosinth)(uint32_t a, void *fpstp)
680 float_status *fpst = fpstp;
682 /* Invalid if we are passed a NaN */
683 if (float16_is_any_nan(a)) {
684 float_raise(float_flag_invalid, fpst);
687 return float16_to_int16(a, fpst);
690 uint32_t HELPER(advsimd_f16touinth)(uint32_t a, void *fpstp)
692 float_status *fpst = fpstp;
694 /* Invalid if we are passed a NaN */
695 if (float16_is_any_nan(a)) {
696 float_raise(float_flag_invalid, fpst);
699 return float16_to_uint16(a, fpst);
702 static int el_from_spsr(uint32_t spsr)
704 /* Return the exception level that this SPSR is requesting a return to,
705 * or -1 if it is invalid (an illegal return)
707 if (spsr & PSTATE_nRW) {
708 switch (spsr & CPSR_M) {
709 case ARM_CPU_MODE_USR:
711 case ARM_CPU_MODE_HYP:
713 case ARM_CPU_MODE_FIQ:
714 case ARM_CPU_MODE_IRQ:
715 case ARM_CPU_MODE_SVC:
716 case ARM_CPU_MODE_ABT:
717 case ARM_CPU_MODE_UND:
718 case ARM_CPU_MODE_SYS:
720 case ARM_CPU_MODE_MON:
721 /* Returning to Mon from AArch64 is never possible,
722 * so this is an illegal return.
728 if (extract32(spsr, 1, 1)) {
729 /* Return with reserved M[1] bit set */
732 if (extract32(spsr, 0, 4) == 1) {
733 /* return to EL0 with M[0] bit set */
736 return extract32(spsr, 2, 2);
740 static void cpsr_write_from_spsr_elx(CPUARMState *env,
745 /* Save SPSR_ELx.SS into PSTATE. */
746 env->pstate = (env->pstate & ~PSTATE_SS) | (val & PSTATE_SS);
749 /* Move DIT to the correct location for CPSR */
750 if (val & PSTATE_DIT) {
755 mask = aarch32_cpsr_valid_mask(env->features, \
756 &env_archcpu(env)->isar);
757 cpsr_write(env, val, mask, CPSRWriteRaw);
760 void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
762 int cur_el = arm_current_el(env);
763 unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
764 uint32_t spsr = env->banked_spsr[spsr_idx];
766 bool return_to_aa64 = (spsr & PSTATE_nRW) == 0;
768 aarch64_save_sp(env, cur_el);
770 arm_clear_exclusive(env);
772 /* We must squash the PSTATE.SS bit to zero unless both of the
774 * 1. debug exceptions are currently disabled
775 * 2. singlestep will be active in the EL we return to
776 * We check 1 here and 2 after we've done the pstate/cpsr write() to
777 * transition to the EL we're going to.
779 if (arm_generate_debug_exceptions(env)) {
784 * FEAT_RME forbids return from EL3 with an invalid security state.
785 * We don't need an explicit check for FEAT_RME here because we enforce
786 * in scr_write() that you can't set the NSE bit without it.
788 if (cur_el == 3 && (env->cp15.scr_el3 & (SCR_NS | SCR_NSE)) == SCR_NSE) {
792 new_el = el_from_spsr(spsr);
796 if (new_el > cur_el || (new_el == 2 && !arm_is_el2_enabled(env))) {
797 /* Disallow return to an EL which is unimplemented or higher
798 * than the current one.
803 if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) {
804 /* Return to an EL which is configured for a different register width */
808 if (new_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
812 qemu_mutex_lock_iothread();
813 arm_call_pre_el_change_hook(env_archcpu(env));
814 qemu_mutex_unlock_iothread();
816 if (!return_to_aa64) {
817 env->aarch64 = false;
818 /* We do a raw CPSR write because aarch64_sync_64_to_32()
819 * will sort the register banks out for us, and we've already
820 * caught all the bad-mode cases in el_from_spsr().
822 cpsr_write_from_spsr_elx(env, spsr);
823 if (!arm_singlestep_active(env)) {
824 env->pstate &= ~PSTATE_SS;
826 aarch64_sync_64_to_32(env);
829 env->regs[15] = new_pc & ~0x1;
831 env->regs[15] = new_pc & ~0x3;
833 helper_rebuild_hflags_a32(env, new_el);
834 qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
835 "AArch32 EL%d PC 0x%" PRIx32 "\n",
836 cur_el, new_el, env->regs[15]);
841 spsr &= aarch64_pstate_valid_mask(&env_archcpu(env)->isar);
842 pstate_write(env, spsr);
843 if (!arm_singlestep_active(env)) {
844 env->pstate &= ~PSTATE_SS;
846 aarch64_restore_sp(env, new_el);
847 helper_rebuild_hflags_a64(env, new_el);
850 * Apply TBI to the exception return address. We had to delay this
851 * until after we selected the new EL, so that we could select the
852 * correct TBI+TBID bits. This is made easier by waiting until after
853 * the hflags rebuild, since we can pull the composite TBII field
856 tbii = EX_TBFLAG_A64(env->hflags, TBII);
857 if ((tbii >> extract64(new_pc, 55, 1)) & 1) {
858 /* TBI is enabled. */
859 int core_mmu_idx = cpu_mmu_index(env, false);
860 if (regime_has_2_ranges(core_to_aa64_mmu_idx(core_mmu_idx))) {
861 new_pc = sextract64(new_pc, 0, 56);
863 new_pc = extract64(new_pc, 0, 56);
868 qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
869 "AArch64 EL%d PC 0x%" PRIx64 "\n",
870 cur_el, new_el, env->pc);
874 * Note that cur_el can never be 0. If new_el is 0, then
875 * el0_a64 is return_to_aa64, else el0_a64 is ignored.
877 aarch64_sve_change_el(env, cur_el, new_el, return_to_aa64);
879 qemu_mutex_lock_iothread();
880 arm_call_el_change_hook(env_archcpu(env));
881 qemu_mutex_unlock_iothread();
886 /* Illegal return events of various kinds have architecturally
887 * mandated behaviour:
888 * restore NZCV and DAIF from SPSR_ELx
890 * restore PC from ELR_ELx
891 * no change to exception level, execution state or stack pointer
893 env->pstate |= PSTATE_IL;
895 spsr &= PSTATE_NZCV | PSTATE_DAIF;
896 spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
897 pstate_write(env, spsr);
898 if (!arm_singlestep_active(env)) {
899 env->pstate &= ~PSTATE_SS;
901 helper_rebuild_hflags_a64(env, cur_el);
902 qemu_log_mask(LOG_GUEST_ERROR, "Illegal exception return at EL%d: "
903 "resuming execution at 0x%" PRIx64 "\n", cur_el, env->pc);
907 * Square Root and Reciprocal square root
910 uint32_t HELPER(sqrt_f16)(uint32_t a, void *fpstp)
912 float_status *s = fpstp;
914 return float16_sqrt(a, s);
917 void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
920 * Implement DC ZVA, which zeroes a fixed-length block of memory.
921 * Note that we do not implement the (architecturally mandated)
922 * alignment fault for attempts to use this on Device memory
923 * (which matches the usual QEMU behaviour of not implementing either
924 * alignment faults or any memory attribute handling).
926 int blocklen = 4 << env_archcpu(env)->dcz_blocksize;
927 uint64_t vaddr = vaddr_in & ~(blocklen - 1);
928 int mmu_idx = cpu_mmu_index(env, false);
932 * Trapless lookup. In addition to actual invalid page, may
933 * return NULL for I/O, watchpoints, clean pages, etc.
935 mem = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx);
937 #ifndef CONFIG_USER_ONLY
938 if (unlikely(!mem)) {
939 uintptr_t ra = GETPC();
942 * Trap if accessing an invalid page. DC_ZVA requires that we supply
943 * the original pointer for an invalid page. But watchpoints require
944 * that we probe the actual space. So do both.
946 (void) probe_write(env, vaddr_in, 1, mmu_idx, ra);
947 mem = probe_write(env, vaddr, blocklen, mmu_idx, ra);
949 if (unlikely(!mem)) {
951 * The only remaining reason for mem == NULL is I/O.
952 * Just do a series of byte writes as the architecture demands.
954 for (int i = 0; i < blocklen; i++) {
955 cpu_stb_mmuidx_ra(env, vaddr + i, 0, mmu_idx, ra);
962 memset(mem, 0, blocklen);
965 void HELPER(unaligned_access)(CPUARMState *env, uint64_t addr,
966 uint32_t access_type, uint32_t mmu_idx)
968 arm_cpu_do_unaligned_access(env_cpu(env), addr, access_type,
972 /* Memory operations (memset, memmove, memcpy) */
975 * Return true if the CPY* and SET* insns can execute; compare
976 * pseudocode CheckMOPSEnabled(), though we refactor it a little.
978 static bool mops_enabled(CPUARMState *env)
980 int el = arm_current_el(env);
983 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) &&
984 !(arm_hcrx_el2_eff(env) & HCRX_MSCEN)) {
989 if (!el_is_in_host(env, 0)) {
990 return env->cp15.sctlr_el[1] & SCTLR_MSCEN;
992 return env->cp15.sctlr_el[2] & SCTLR_MSCEN;
998 static void check_mops_enabled(CPUARMState *env, uintptr_t ra)
1000 if (!mops_enabled(env)) {
1001 raise_exception_ra(env, EXCP_UDEF, syn_uncategorized(),
1002 exception_target_el(env), ra);
1007 * Return the target exception level for an exception due
1008 * to mismatched arguments in a FEAT_MOPS copy or set.
1009 * Compare pseudocode MismatchedCpySetTargetEL()
1011 static int mops_mismatch_exception_target_el(CPUARMState *env)
1013 int el = arm_current_el(env);
1018 if (el == 0 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
1021 if (el == 1 && (arm_hcrx_el2_eff(env) & HCRX_MCE2)) {
1028 * Check whether an M or E instruction was executed with a CF value
1029 * indicating the wrong option for this implementation.
1030 * Assumes we are always Option A.
1032 static void check_mops_wrong_option(CPUARMState *env, uint32_t syndrome,
1036 syndrome |= 1 << 17; /* Set the wrong-option bit */
1037 raise_exception_ra(env, EXCP_UDEF, syndrome,
1038 mops_mismatch_exception_target_el(env), ra);
1043 * Return the maximum number of bytes we can transfer starting at addr
1044 * without crossing a page boundary.
1046 static uint64_t page_limit(uint64_t addr)
1048 return TARGET_PAGE_ALIGN(addr + 1) - addr;
1052 * Perform part of a memory set on an area of guest memory starting at
1053 * toaddr (a dirty address) and extending for setsize bytes.
1055 * Returns the number of bytes actually set, which might be less than
1056 * setsize; the caller should loop until the whole set has been done.
1057 * The caller should ensure that the guest registers are correct
1058 * for the possibility that the first byte of the set encounters
1059 * an exception or watchpoint. We guarantee not to take any faults
1060 * for bytes other than the first.
1062 static uint64_t set_step(CPUARMState *env, uint64_t toaddr,
1063 uint64_t setsize, uint32_t data, int memidx,
1064 uint32_t *mtedesc, uintptr_t ra)
1068 setsize = MIN(setsize, page_limit(toaddr));
1070 uint64_t mtesize = mte_mops_probe(env, toaddr, setsize, *mtedesc);
1072 /* Trap, or not. All CPU state is up to date */
1073 mte_check_fail(env, *mtedesc, toaddr, ra);
1074 /* Continue, with no further MTE checks required */
1077 /* Advance to the end, or to the tag mismatch */
1078 setsize = MIN(setsize, mtesize);
1082 toaddr = useronly_clean_ptr(toaddr);
1084 * Trapless lookup: returns NULL for invalid page, I/O,
1085 * watchpoints, clean pages, etc.
1087 mem = tlb_vaddr_to_host(env, toaddr, MMU_DATA_STORE, memidx);
1089 #ifndef CONFIG_USER_ONLY
1090 if (unlikely(!mem)) {
1092 * Slow-path: just do one byte write. This will handle the
1093 * watchpoint, invalid page, etc handling correctly.
1094 * For clean code pages, the next iteration will see
1095 * the page dirty and will use the fast path.
1097 cpu_stb_mmuidx_ra(env, toaddr, data, memidx, ra);
1101 /* Easy case: just memset the host memory */
1102 memset(mem, data, setsize);
1106 typedef uint64_t StepFn(CPUARMState *env, uint64_t toaddr,
1107 uint64_t setsize, uint32_t data,
1108 int memidx, uint32_t *mtedesc, uintptr_t ra);
1110 /* Extract register numbers from a MOPS exception syndrome value */
1111 static int mops_destreg(uint32_t syndrome)
1113 return extract32(syndrome, 10, 5);
1116 static int mops_srcreg(uint32_t syndrome)
1118 return extract32(syndrome, 5, 5);
1121 static int mops_sizereg(uint32_t syndrome)
1123 return extract32(syndrome, 0, 5);
1127 * Return true if TCMA and TBI bits mean we need to do MTE checks.
1128 * We only need to do this once per MOPS insn, not for every page.
1130 static bool mte_checks_needed(uint64_t ptr, uint32_t desc)
1132 int bit55 = extract64(ptr, 55, 1);
1135 * Note that tbi_check() returns true for "access checked" but
1136 * tcma_check() returns true for "access unchecked".
1138 if (!tbi_check(desc, bit55)) {
1141 return !tcma_check(desc, bit55, allocation_tag_from_addr(ptr));
1145 * For the Memory Set operation, our implementation chooses
1146 * always to use "option A", where we update Xd to the final
1147 * address in the SETP insn, and set Xn to be -(bytes remaining).
1148 * On SETM and SETE insns we only need update Xn.
1151 * @syndrome: syndrome value for mismatch exceptions
1152 * (also contains the register numbers we need to use)
1153 * @mtedesc: MTE descriptor word
1154 * @stepfn: function which does a single part of the set operation
1155 * @is_setg: true if this is the tag-setting SETG variant
1157 static void do_setp(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc,
1158 StepFn *stepfn, bool is_setg, uintptr_t ra)
1160 /* Prologue: we choose to do up to the next page boundary */
1161 int rd = mops_destreg(syndrome);
1162 int rs = mops_srcreg(syndrome);
1163 int rn = mops_sizereg(syndrome);
1164 uint8_t data = env->xregs[rs];
1165 uint32_t memidx = FIELD_EX32(mtedesc, MTEDESC, MIDX);
1166 uint64_t toaddr = env->xregs[rd];
1167 uint64_t setsize = env->xregs[rn];
1168 uint64_t stagesetsize, step;
1170 check_mops_enabled(env, ra);
1172 if (setsize > INT64_MAX) {
1173 setsize = INT64_MAX;
1176 if (!mte_checks_needed(toaddr, mtedesc)) {
1180 stagesetsize = MIN(setsize, page_limit(toaddr));
1181 while (stagesetsize) {
1182 env->xregs[rd] = toaddr;
1183 env->xregs[rn] = setsize;
1184 step = stepfn(env, toaddr, stagesetsize, data, memidx, &mtedesc, ra);
1187 stagesetsize -= step;
1189 /* Insn completed, so update registers to the Option A format */
1190 env->xregs[rd] = toaddr + setsize;
1191 env->xregs[rn] = -setsize;
1193 /* Set NZCV = 0000 to indicate we are an Option A implementation */
1195 env->ZF = 1; /* our env->ZF encoding is inverted */
1201 void HELPER(setp)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc)
1203 do_setp(env, syndrome, mtedesc, set_step, false, GETPC());
1206 static void do_setm(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc,
1207 StepFn *stepfn, bool is_setg, uintptr_t ra)
1209 /* Main: we choose to do all the full-page chunks */
1210 CPUState *cs = env_cpu(env);
1211 int rd = mops_destreg(syndrome);
1212 int rs = mops_srcreg(syndrome);
1213 int rn = mops_sizereg(syndrome);
1214 uint8_t data = env->xregs[rs];
1215 uint64_t toaddr = env->xregs[rd] + env->xregs[rn];
1216 uint64_t setsize = -env->xregs[rn];
1217 uint32_t memidx = FIELD_EX32(mtedesc, MTEDESC, MIDX);
1218 uint64_t step, stagesetsize;
1220 check_mops_enabled(env, ra);
1223 * We're allowed to NOP out "no data to copy" before the consistency
1224 * checks; we choose to do so.
1226 if (env->xregs[rn] == 0) {
1230 check_mops_wrong_option(env, syndrome, ra);
1233 * Our implementation will work fine even if we have an unaligned
1234 * destination address, and because we update Xn every time around
1235 * the loop below and the return value from stepfn() may be less
1236 * than requested, we might find toaddr is unaligned. So we don't
1237 * have an IMPDEF check for alignment here.
1240 if (!mte_checks_needed(toaddr, mtedesc)) {
1244 /* Do the actual memset: we leave the last partial page to SETE */
1245 stagesetsize = setsize & TARGET_PAGE_MASK;
1246 while (stagesetsize > 0) {
1247 step = stepfn(env, toaddr, setsize, data, memidx, &mtedesc, ra);
1250 stagesetsize -= step;
1251 env->xregs[rn] = -setsize;
1252 if (stagesetsize > 0 && unlikely(cpu_loop_exit_requested(cs))) {
1253 cpu_loop_exit_restore(cs, ra);
1258 void HELPER(setm)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc)
1260 do_setm(env, syndrome, mtedesc, set_step, false, GETPC());
1263 static void do_sete(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc,
1264 StepFn *stepfn, bool is_setg, uintptr_t ra)
1266 /* Epilogue: do the last partial page */
1267 int rd = mops_destreg(syndrome);
1268 int rs = mops_srcreg(syndrome);
1269 int rn = mops_sizereg(syndrome);
1270 uint8_t data = env->xregs[rs];
1271 uint64_t toaddr = env->xregs[rd] + env->xregs[rn];
1272 uint64_t setsize = -env->xregs[rn];
1273 uint32_t memidx = FIELD_EX32(mtedesc, MTEDESC, MIDX);
1276 check_mops_enabled(env, ra);
1279 * We're allowed to NOP out "no data to copy" before the consistency
1280 * checks; we choose to do so.
1286 check_mops_wrong_option(env, syndrome, ra);
1289 * Our implementation has no address alignment requirements, but
1290 * we do want to enforce the "less than a page" size requirement,
1291 * so we don't need to have the "check for interrupts" here.
1293 if (setsize >= TARGET_PAGE_SIZE) {
1294 raise_exception_ra(env, EXCP_UDEF, syndrome,
1295 mops_mismatch_exception_target_el(env), ra);
1298 if (!mte_checks_needed(toaddr, mtedesc)) {
1302 /* Do the actual memset */
1303 while (setsize > 0) {
1304 step = stepfn(env, toaddr, setsize, data, memidx, &mtedesc, ra);
1307 env->xregs[rn] = -setsize;
1311 void HELPER(sete)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc)
1313 do_sete(env, syndrome, mtedesc, set_step, false, GETPC());