2 * MIPS SIMD Architecture Module Instruction emulation helpers for QEMU.
4 * Copyright (c) 2014 Imagination Technologies
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/exec-all.h"
23 #include "exec/helper-proto.h"
25 /* Data format min and max values */
26 #define DF_BITS(df) (1 << ((df) + 3))
28 #define DF_MAX_INT(df) (int64_t)((1LL << (DF_BITS(df) - 1)) - 1)
29 #define M_MAX_INT(m) (int64_t)((1LL << ((m) - 1)) - 1)
31 #define DF_MIN_INT(df) (int64_t)(-(1LL << (DF_BITS(df) - 1)))
32 #define M_MIN_INT(m) (int64_t)(-(1LL << ((m) - 1)))
34 #define DF_MAX_UINT(df) (uint64_t)(-1ULL >> (64 - DF_BITS(df)))
35 #define M_MAX_UINT(m) (uint64_t)(-1ULL >> (64 - (m)))
37 #define UNSIGNED(x, df) ((x) & DF_MAX_UINT(df))
38 #define SIGNED(x, df) \
39 ((((int64_t)x) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df)))
41 /* Element-by-element access macros */
42 #define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df))
44 static inline void msa_move_v(wr_t *pwd, wr_t *pws)
48 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
49 pwd->d[i] = pws->d[i];
53 #define MSA_FN_IMM8(FUNC, DEST, OPERATION) \
54 void helper_msa_ ## FUNC(CPUMIPSState *env, uint32_t wd, uint32_t ws, \
57 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
58 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
60 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
65 MSA_FN_IMM8(andi_b, pwd->b[i], pws->b[i] & i8)
66 MSA_FN_IMM8(ori_b, pwd->b[i], pws->b[i] | i8)
67 MSA_FN_IMM8(nori_b, pwd->b[i], ~(pws->b[i] | i8))
68 MSA_FN_IMM8(xori_b, pwd->b[i], pws->b[i] ^ i8)
70 #define BIT_MOVE_IF_NOT_ZERO(dest, arg1, arg2, df) \
71 UNSIGNED(((dest & (~arg2)) | (arg1 & arg2)), df)
72 MSA_FN_IMM8(bmnzi_b, pwd->b[i],
73 BIT_MOVE_IF_NOT_ZERO(pwd->b[i], pws->b[i], i8, DF_BYTE))
75 #define BIT_MOVE_IF_ZERO(dest, arg1, arg2, df) \
76 UNSIGNED((dest & arg2) | (arg1 & (~arg2)), df)
77 MSA_FN_IMM8(bmzi_b, pwd->b[i],
78 BIT_MOVE_IF_ZERO(pwd->b[i], pws->b[i], i8, DF_BYTE))
80 #define BIT_SELECT(dest, arg1, arg2, df) \
81 UNSIGNED((arg1 & (~dest)) | (arg2 & dest), df)
82 MSA_FN_IMM8(bseli_b, pwd->b[i],
83 BIT_SELECT(pwd->b[i], pws->b[i], i8, DF_BYTE))
87 #define SHF_POS(i, imm) (((i) & 0xfc) + (((imm) >> (2 * ((i) & 0x03))) & 0x03))
89 void helper_msa_shf_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
90 uint32_t ws, uint32_t imm)
92 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
93 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
99 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) {
100 pwx->b[i] = pws->b[SHF_POS(i, imm)];
104 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) {
105 pwx->h[i] = pws->h[SHF_POS(i, imm)];
109 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
110 pwx->w[i] = pws->w[SHF_POS(i, imm)];
116 msa_move_v(pwd, pwx);
119 #define MSA_FN_VECTOR(FUNC, DEST, OPERATION) \
120 void helper_msa_ ## FUNC(CPUMIPSState *env, uint32_t wd, uint32_t ws, \
123 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
124 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
125 wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \
127 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
132 MSA_FN_VECTOR(and_v, pwd->d[i], pws->d[i] & pwt->d[i])
133 MSA_FN_VECTOR(or_v, pwd->d[i], pws->d[i] | pwt->d[i])
134 MSA_FN_VECTOR(nor_v, pwd->d[i], ~(pws->d[i] | pwt->d[i]))
135 MSA_FN_VECTOR(xor_v, pwd->d[i], pws->d[i] ^ pwt->d[i])
136 MSA_FN_VECTOR(bmnz_v, pwd->d[i],
137 BIT_MOVE_IF_NOT_ZERO(pwd->d[i], pws->d[i], pwt->d[i], DF_DOUBLE))
138 MSA_FN_VECTOR(bmz_v, pwd->d[i],
139 BIT_MOVE_IF_ZERO(pwd->d[i], pws->d[i], pwt->d[i], DF_DOUBLE))
140 MSA_FN_VECTOR(bsel_v, pwd->d[i],
141 BIT_SELECT(pwd->d[i], pws->d[i], pwt->d[i], DF_DOUBLE))
142 #undef BIT_MOVE_IF_NOT_ZERO
143 #undef BIT_MOVE_IF_ZERO
147 static inline int64_t msa_addv_df(uint32_t df, int64_t arg1, int64_t arg2)
152 static inline int64_t msa_subv_df(uint32_t df, int64_t arg1, int64_t arg2)
157 static inline int64_t msa_ceq_df(uint32_t df, int64_t arg1, int64_t arg2)
159 return arg1 == arg2 ? -1 : 0;
162 static inline int64_t msa_cle_s_df(uint32_t df, int64_t arg1, int64_t arg2)
164 return arg1 <= arg2 ? -1 : 0;
167 static inline int64_t msa_cle_u_df(uint32_t df, int64_t arg1, int64_t arg2)
169 uint64_t u_arg1 = UNSIGNED(arg1, df);
170 uint64_t u_arg2 = UNSIGNED(arg2, df);
171 return u_arg1 <= u_arg2 ? -1 : 0;
174 static inline int64_t msa_clt_s_df(uint32_t df, int64_t arg1, int64_t arg2)
176 return arg1 < arg2 ? -1 : 0;
179 static inline int64_t msa_clt_u_df(uint32_t df, int64_t arg1, int64_t arg2)
181 uint64_t u_arg1 = UNSIGNED(arg1, df);
182 uint64_t u_arg2 = UNSIGNED(arg2, df);
183 return u_arg1 < u_arg2 ? -1 : 0;
186 static inline int64_t msa_max_s_df(uint32_t df, int64_t arg1, int64_t arg2)
188 return arg1 > arg2 ? arg1 : arg2;
191 static inline int64_t msa_max_u_df(uint32_t df, int64_t arg1, int64_t arg2)
193 uint64_t u_arg1 = UNSIGNED(arg1, df);
194 uint64_t u_arg2 = UNSIGNED(arg2, df);
195 return u_arg1 > u_arg2 ? arg1 : arg2;
198 static inline int64_t msa_min_s_df(uint32_t df, int64_t arg1, int64_t arg2)
200 return arg1 < arg2 ? arg1 : arg2;
203 static inline int64_t msa_min_u_df(uint32_t df, int64_t arg1, int64_t arg2)
205 uint64_t u_arg1 = UNSIGNED(arg1, df);
206 uint64_t u_arg2 = UNSIGNED(arg2, df);
207 return u_arg1 < u_arg2 ? arg1 : arg2;
210 #define MSA_BINOP_IMM_DF(helper, func) \
211 void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, \
212 uint32_t wd, uint32_t ws, int32_t u5) \
214 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
215 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
220 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
221 pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], u5); \
225 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
226 pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], u5); \
230 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
231 pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], u5); \
235 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
236 pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], u5); \
244 MSA_BINOP_IMM_DF(addvi, addv)
245 MSA_BINOP_IMM_DF(subvi, subv)
246 MSA_BINOP_IMM_DF(ceqi, ceq)
247 MSA_BINOP_IMM_DF(clei_s, cle_s)
248 MSA_BINOP_IMM_DF(clei_u, cle_u)
249 MSA_BINOP_IMM_DF(clti_s, clt_s)
250 MSA_BINOP_IMM_DF(clti_u, clt_u)
251 MSA_BINOP_IMM_DF(maxi_s, max_s)
252 MSA_BINOP_IMM_DF(maxi_u, max_u)
253 MSA_BINOP_IMM_DF(mini_s, min_s)
254 MSA_BINOP_IMM_DF(mini_u, min_u)
255 #undef MSA_BINOP_IMM_DF
257 void helper_msa_ldi_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
260 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
265 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) {
266 pwd->b[i] = (int8_t)s10;
270 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) {
271 pwd->h[i] = (int16_t)s10;
275 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
276 pwd->w[i] = (int32_t)s10;
280 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
281 pwd->d[i] = (int64_t)s10;
289 /* Data format bit position and unsigned values */
290 #define BIT_POSITION(x, df) ((uint64_t)(x) % DF_BITS(df))
292 static inline int64_t msa_sll_df(uint32_t df, int64_t arg1, int64_t arg2)
294 int32_t b_arg2 = BIT_POSITION(arg2, df);
295 return arg1 << b_arg2;
298 static inline int64_t msa_sra_df(uint32_t df, int64_t arg1, int64_t arg2)
300 int32_t b_arg2 = BIT_POSITION(arg2, df);
301 return arg1 >> b_arg2;
304 static inline int64_t msa_srl_df(uint32_t df, int64_t arg1, int64_t arg2)
306 uint64_t u_arg1 = UNSIGNED(arg1, df);
307 int32_t b_arg2 = BIT_POSITION(arg2, df);
308 return u_arg1 >> b_arg2;
311 static inline int64_t msa_bclr_df(uint32_t df, int64_t arg1, int64_t arg2)
313 int32_t b_arg2 = BIT_POSITION(arg2, df);
314 return UNSIGNED(arg1 & (~(1LL << b_arg2)), df);
317 static inline int64_t msa_bset_df(uint32_t df, int64_t arg1,
320 int32_t b_arg2 = BIT_POSITION(arg2, df);
321 return UNSIGNED(arg1 | (1LL << b_arg2), df);
324 static inline int64_t msa_bneg_df(uint32_t df, int64_t arg1, int64_t arg2)
326 int32_t b_arg2 = BIT_POSITION(arg2, df);
327 return UNSIGNED(arg1 ^ (1LL << b_arg2), df);
330 static inline int64_t msa_binsl_df(uint32_t df, int64_t dest, int64_t arg1,
333 uint64_t u_arg1 = UNSIGNED(arg1, df);
334 uint64_t u_dest = UNSIGNED(dest, df);
335 int32_t sh_d = BIT_POSITION(arg2, df) + 1;
336 int32_t sh_a = DF_BITS(df) - sh_d;
337 if (sh_d == DF_BITS(df)) {
340 return UNSIGNED(UNSIGNED(u_dest << sh_d, df) >> sh_d, df) |
341 UNSIGNED(UNSIGNED(u_arg1 >> sh_a, df) << sh_a, df);
345 static inline int64_t msa_binsr_df(uint32_t df, int64_t dest, int64_t arg1,
348 uint64_t u_arg1 = UNSIGNED(arg1, df);
349 uint64_t u_dest = UNSIGNED(dest, df);
350 int32_t sh_d = BIT_POSITION(arg2, df) + 1;
351 int32_t sh_a = DF_BITS(df) - sh_d;
352 if (sh_d == DF_BITS(df)) {
355 return UNSIGNED(UNSIGNED(u_dest >> sh_d, df) << sh_d, df) |
356 UNSIGNED(UNSIGNED(u_arg1 << sh_a, df) >> sh_a, df);
360 static inline int64_t msa_sat_s_df(uint32_t df, int64_t arg, uint32_t m)
362 return arg < M_MIN_INT(m+1) ? M_MIN_INT(m+1) :
363 arg > M_MAX_INT(m+1) ? M_MAX_INT(m+1) :
367 static inline int64_t msa_sat_u_df(uint32_t df, int64_t arg, uint32_t m)
369 uint64_t u_arg = UNSIGNED(arg, df);
370 return u_arg < M_MAX_UINT(m+1) ? u_arg :
374 static inline int64_t msa_srar_df(uint32_t df, int64_t arg1, int64_t arg2)
376 int32_t b_arg2 = BIT_POSITION(arg2, df);
380 int64_t r_bit = (arg1 >> (b_arg2 - 1)) & 1;
381 return (arg1 >> b_arg2) + r_bit;
385 static inline int64_t msa_srlr_df(uint32_t df, int64_t arg1, int64_t arg2)
387 uint64_t u_arg1 = UNSIGNED(arg1, df);
388 int32_t b_arg2 = BIT_POSITION(arg2, df);
392 uint64_t r_bit = (u_arg1 >> (b_arg2 - 1)) & 1;
393 return (u_arg1 >> b_arg2) + r_bit;
397 #define MSA_BINOP_IMMU_DF(helper, func) \
398 void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, uint32_t wd, \
399 uint32_t ws, uint32_t u5) \
401 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
402 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
407 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
408 pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], u5); \
412 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
413 pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], u5); \
417 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
418 pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], u5); \
422 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
423 pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], u5); \
431 MSA_BINOP_IMMU_DF(slli, sll)
432 MSA_BINOP_IMMU_DF(srai, sra)
433 MSA_BINOP_IMMU_DF(srli, srl)
434 MSA_BINOP_IMMU_DF(bclri, bclr)
435 MSA_BINOP_IMMU_DF(bseti, bset)
436 MSA_BINOP_IMMU_DF(bnegi, bneg)
437 MSA_BINOP_IMMU_DF(sat_s, sat_s)
438 MSA_BINOP_IMMU_DF(sat_u, sat_u)
439 MSA_BINOP_IMMU_DF(srari, srar)
440 MSA_BINOP_IMMU_DF(srlri, srlr)
441 #undef MSA_BINOP_IMMU_DF
443 #define MSA_TEROP_IMMU_DF(helper, func) \
444 void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, \
445 uint32_t wd, uint32_t ws, uint32_t u5) \
447 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
448 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
453 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
454 pwd->b[i] = msa_ ## func ## _df(df, pwd->b[i], pws->b[i], \
459 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
460 pwd->h[i] = msa_ ## func ## _df(df, pwd->h[i], pws->h[i], \
465 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
466 pwd->w[i] = msa_ ## func ## _df(df, pwd->w[i], pws->w[i], \
471 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
472 pwd->d[i] = msa_ ## func ## _df(df, pwd->d[i], pws->d[i], \
481 MSA_TEROP_IMMU_DF(binsli, binsl)
482 MSA_TEROP_IMMU_DF(binsri, binsr)
483 #undef MSA_TEROP_IMMU_DF
485 static inline int64_t msa_max_a_df(uint32_t df, int64_t arg1, int64_t arg2)
487 uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1;
488 uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2;
489 return abs_arg1 > abs_arg2 ? arg1 : arg2;
492 static inline int64_t msa_min_a_df(uint32_t df, int64_t arg1, int64_t arg2)
494 uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1;
495 uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2;
496 return abs_arg1 < abs_arg2 ? arg1 : arg2;
499 static inline int64_t msa_add_a_df(uint32_t df, int64_t arg1, int64_t arg2)
501 uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1;
502 uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2;
503 return abs_arg1 + abs_arg2;
506 static inline int64_t msa_adds_a_df(uint32_t df, int64_t arg1, int64_t arg2)
508 uint64_t max_int = (uint64_t)DF_MAX_INT(df);
509 uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1;
510 uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2;
511 if (abs_arg1 > max_int || abs_arg2 > max_int) {
512 return (int64_t)max_int;
514 return (abs_arg1 < max_int - abs_arg2) ? abs_arg1 + abs_arg2 : max_int;
518 static inline int64_t msa_adds_s_df(uint32_t df, int64_t arg1, int64_t arg2)
520 int64_t max_int = DF_MAX_INT(df);
521 int64_t min_int = DF_MIN_INT(df);
523 return (min_int - arg1 < arg2) ? arg1 + arg2 : min_int;
525 return (arg2 < max_int - arg1) ? arg1 + arg2 : max_int;
529 static inline uint64_t msa_adds_u_df(uint32_t df, uint64_t arg1, uint64_t arg2)
531 uint64_t max_uint = DF_MAX_UINT(df);
532 uint64_t u_arg1 = UNSIGNED(arg1, df);
533 uint64_t u_arg2 = UNSIGNED(arg2, df);
534 return (u_arg1 < max_uint - u_arg2) ? u_arg1 + u_arg2 : max_uint;
537 static inline int64_t msa_ave_s_df(uint32_t df, int64_t arg1, int64_t arg2)
540 return (arg1 >> 1) + (arg2 >> 1) + (arg1 & arg2 & 1);
543 static inline uint64_t msa_ave_u_df(uint32_t df, uint64_t arg1, uint64_t arg2)
545 uint64_t u_arg1 = UNSIGNED(arg1, df);
546 uint64_t u_arg2 = UNSIGNED(arg2, df);
548 return (u_arg1 >> 1) + (u_arg2 >> 1) + (u_arg1 & u_arg2 & 1);
551 static inline int64_t msa_aver_s_df(uint32_t df, int64_t arg1, int64_t arg2)
554 return (arg1 >> 1) + (arg2 >> 1) + ((arg1 | arg2) & 1);
557 static inline uint64_t msa_aver_u_df(uint32_t df, uint64_t arg1, uint64_t arg2)
559 uint64_t u_arg1 = UNSIGNED(arg1, df);
560 uint64_t u_arg2 = UNSIGNED(arg2, df);
562 return (u_arg1 >> 1) + (u_arg2 >> 1) + ((u_arg1 | u_arg2) & 1);
565 static inline int64_t msa_subs_s_df(uint32_t df, int64_t arg1, int64_t arg2)
567 int64_t max_int = DF_MAX_INT(df);
568 int64_t min_int = DF_MIN_INT(df);
570 return (min_int + arg2 < arg1) ? arg1 - arg2 : min_int;
572 return (arg1 < max_int + arg2) ? arg1 - arg2 : max_int;
576 static inline int64_t msa_subs_u_df(uint32_t df, int64_t arg1, int64_t arg2)
578 uint64_t u_arg1 = UNSIGNED(arg1, df);
579 uint64_t u_arg2 = UNSIGNED(arg2, df);
580 return (u_arg1 > u_arg2) ? u_arg1 - u_arg2 : 0;
583 static inline int64_t msa_subsus_u_df(uint32_t df, int64_t arg1, int64_t arg2)
585 uint64_t u_arg1 = UNSIGNED(arg1, df);
586 uint64_t max_uint = DF_MAX_UINT(df);
588 uint64_t u_arg2 = (uint64_t)arg2;
589 return (u_arg1 > u_arg2) ?
590 (int64_t)(u_arg1 - u_arg2) :
593 uint64_t u_arg2 = (uint64_t)(-arg2);
594 return (u_arg1 < max_uint - u_arg2) ?
595 (int64_t)(u_arg1 + u_arg2) :
600 static inline int64_t msa_subsuu_s_df(uint32_t df, int64_t arg1, int64_t arg2)
602 uint64_t u_arg1 = UNSIGNED(arg1, df);
603 uint64_t u_arg2 = UNSIGNED(arg2, df);
604 int64_t max_int = DF_MAX_INT(df);
605 int64_t min_int = DF_MIN_INT(df);
606 if (u_arg1 > u_arg2) {
607 return u_arg1 - u_arg2 < (uint64_t)max_int ?
608 (int64_t)(u_arg1 - u_arg2) :
611 return u_arg2 - u_arg1 < (uint64_t)(-min_int) ?
612 (int64_t)(u_arg1 - u_arg2) :
617 static inline int64_t msa_asub_s_df(uint32_t df, int64_t arg1, int64_t arg2)
620 return (arg1 < arg2) ?
621 (uint64_t)(arg2 - arg1) : (uint64_t)(arg1 - arg2);
624 static inline uint64_t msa_asub_u_df(uint32_t df, uint64_t arg1, uint64_t arg2)
626 uint64_t u_arg1 = UNSIGNED(arg1, df);
627 uint64_t u_arg2 = UNSIGNED(arg2, df);
628 /* unsigned compare */
629 return (u_arg1 < u_arg2) ?
630 (uint64_t)(u_arg2 - u_arg1) : (uint64_t)(u_arg1 - u_arg2);
633 static inline int64_t msa_mulv_df(uint32_t df, int64_t arg1, int64_t arg2)
638 static inline int64_t msa_div_s_df(uint32_t df, int64_t arg1, int64_t arg2)
640 if (arg1 == DF_MIN_INT(df) && arg2 == -1) {
641 return DF_MIN_INT(df);
643 return arg2 ? arg1 / arg2 : 0;
646 static inline int64_t msa_div_u_df(uint32_t df, int64_t arg1, int64_t arg2)
648 uint64_t u_arg1 = UNSIGNED(arg1, df);
649 uint64_t u_arg2 = UNSIGNED(arg2, df);
650 return u_arg2 ? u_arg1 / u_arg2 : 0;
653 static inline int64_t msa_mod_s_df(uint32_t df, int64_t arg1, int64_t arg2)
655 if (arg1 == DF_MIN_INT(df) && arg2 == -1) {
658 return arg2 ? arg1 % arg2 : 0;
661 static inline int64_t msa_mod_u_df(uint32_t df, int64_t arg1, int64_t arg2)
663 uint64_t u_arg1 = UNSIGNED(arg1, df);
664 uint64_t u_arg2 = UNSIGNED(arg2, df);
665 return u_arg2 ? u_arg1 % u_arg2 : 0;
668 #define SIGNED_EVEN(a, df) \
669 ((((int64_t)(a)) << (64 - DF_BITS(df)/2)) >> (64 - DF_BITS(df)/2))
671 #define UNSIGNED_EVEN(a, df) \
672 ((((uint64_t)(a)) << (64 - DF_BITS(df)/2)) >> (64 - DF_BITS(df)/2))
674 #define SIGNED_ODD(a, df) \
675 ((((int64_t)(a)) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df)/2))
677 #define UNSIGNED_ODD(a, df) \
678 ((((uint64_t)(a)) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df)/2))
680 #define SIGNED_EXTRACT(e, o, a, df) \
682 e = SIGNED_EVEN(a, df); \
683 o = SIGNED_ODD(a, df); \
686 #define UNSIGNED_EXTRACT(e, o, a, df) \
688 e = UNSIGNED_EVEN(a, df); \
689 o = UNSIGNED_ODD(a, df); \
692 static inline int64_t msa_dotp_s_df(uint32_t df, int64_t arg1, int64_t arg2)
698 SIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
699 SIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
700 return (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2);
703 static inline int64_t msa_dotp_u_df(uint32_t df, int64_t arg1, int64_t arg2)
709 UNSIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
710 UNSIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
711 return (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2);
714 #define CONCATENATE_AND_SLIDE(s, k) \
716 for (i = 0; i < s; i++) { \
717 v[i] = pws->b[s * k + i]; \
718 v[i + s] = pwd->b[s * k + i]; \
720 for (i = 0; i < s; i++) { \
721 pwd->b[s * k + i] = v[i + n]; \
725 static inline void msa_sld_df(uint32_t df, wr_t *pwd,
726 wr_t *pws, target_ulong rt)
728 uint32_t n = rt % DF_ELEMENTS(df);
734 CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_BYTE), 0);
737 for (k = 0; k < 2; k++) {
738 CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_HALF), k);
742 for (k = 0; k < 4; k++) {
743 CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_WORD), k);
747 for (k = 0; k < 8; k++) {
748 CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_DOUBLE), k);
756 static inline int64_t msa_hadd_s_df(uint32_t df, int64_t arg1, int64_t arg2)
758 return SIGNED_ODD(arg1, df) + SIGNED_EVEN(arg2, df);
761 static inline int64_t msa_hadd_u_df(uint32_t df, int64_t arg1, int64_t arg2)
763 return UNSIGNED_ODD(arg1, df) + UNSIGNED_EVEN(arg2, df);
766 static inline int64_t msa_hsub_s_df(uint32_t df, int64_t arg1, int64_t arg2)
768 return SIGNED_ODD(arg1, df) - SIGNED_EVEN(arg2, df);
771 static inline int64_t msa_hsub_u_df(uint32_t df, int64_t arg1, int64_t arg2)
773 return UNSIGNED_ODD(arg1, df) - UNSIGNED_EVEN(arg2, df);
776 static inline int64_t msa_mul_q_df(uint32_t df, int64_t arg1, int64_t arg2)
778 int64_t q_min = DF_MIN_INT(df);
779 int64_t q_max = DF_MAX_INT(df);
781 if (arg1 == q_min && arg2 == q_min) {
784 return (arg1 * arg2) >> (DF_BITS(df) - 1);
787 static inline int64_t msa_mulr_q_df(uint32_t df, int64_t arg1, int64_t arg2)
789 int64_t q_min = DF_MIN_INT(df);
790 int64_t q_max = DF_MAX_INT(df);
791 int64_t r_bit = 1 << (DF_BITS(df) - 2);
793 if (arg1 == q_min && arg2 == q_min) {
796 return (arg1 * arg2 + r_bit) >> (DF_BITS(df) - 1);
799 #define MSA_BINOP_DF(func) \
800 void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, \
801 uint32_t wd, uint32_t ws, uint32_t wt) \
803 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
804 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
805 wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \
810 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
811 pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], pwt->b[i]); \
815 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
816 pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], pwt->h[i]); \
820 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
821 pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], pwt->w[i]); \
825 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
826 pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], pwt->d[i]); \
863 MSA_BINOP_DF(subsus_u)
864 MSA_BINOP_DF(subsuu_s)
885 void helper_msa_sld_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
886 uint32_t ws, uint32_t rt)
888 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
889 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
891 msa_sld_df(df, pwd, pws, env->active_tc.gpr[rt]);
894 static inline int64_t msa_maddv_df(uint32_t df, int64_t dest, int64_t arg1,
897 return dest + arg1 * arg2;
900 static inline int64_t msa_msubv_df(uint32_t df, int64_t dest, int64_t arg1,
903 return dest - arg1 * arg2;
906 static inline int64_t msa_dpadd_s_df(uint32_t df, int64_t dest, int64_t arg1,
913 SIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
914 SIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
915 return dest + (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2);
918 static inline int64_t msa_dpadd_u_df(uint32_t df, int64_t dest, int64_t arg1,
925 UNSIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
926 UNSIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
927 return dest + (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2);
930 static inline int64_t msa_dpsub_s_df(uint32_t df, int64_t dest, int64_t arg1,
937 SIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
938 SIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
939 return dest - ((even_arg1 * even_arg2) + (odd_arg1 * odd_arg2));
942 static inline int64_t msa_dpsub_u_df(uint32_t df, int64_t dest, int64_t arg1,
949 UNSIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
950 UNSIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
951 return dest - ((even_arg1 * even_arg2) + (odd_arg1 * odd_arg2));
954 static inline int64_t msa_madd_q_df(uint32_t df, int64_t dest, int64_t arg1,
957 int64_t q_prod, q_ret;
959 int64_t q_max = DF_MAX_INT(df);
960 int64_t q_min = DF_MIN_INT(df);
962 q_prod = arg1 * arg2;
963 q_ret = ((dest << (DF_BITS(df) - 1)) + q_prod) >> (DF_BITS(df) - 1);
965 return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret;
968 static inline int64_t msa_msub_q_df(uint32_t df, int64_t dest, int64_t arg1,
971 int64_t q_prod, q_ret;
973 int64_t q_max = DF_MAX_INT(df);
974 int64_t q_min = DF_MIN_INT(df);
976 q_prod = arg1 * arg2;
977 q_ret = ((dest << (DF_BITS(df) - 1)) - q_prod) >> (DF_BITS(df) - 1);
979 return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret;
982 static inline int64_t msa_maddr_q_df(uint32_t df, int64_t dest, int64_t arg1,
985 int64_t q_prod, q_ret;
987 int64_t q_max = DF_MAX_INT(df);
988 int64_t q_min = DF_MIN_INT(df);
989 int64_t r_bit = 1 << (DF_BITS(df) - 2);
991 q_prod = arg1 * arg2;
992 q_ret = ((dest << (DF_BITS(df) - 1)) + q_prod + r_bit) >> (DF_BITS(df) - 1);
994 return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret;
997 static inline int64_t msa_msubr_q_df(uint32_t df, int64_t dest, int64_t arg1,
1000 int64_t q_prod, q_ret;
1002 int64_t q_max = DF_MAX_INT(df);
1003 int64_t q_min = DF_MIN_INT(df);
1004 int64_t r_bit = 1 << (DF_BITS(df) - 2);
1006 q_prod = arg1 * arg2;
1007 q_ret = ((dest << (DF_BITS(df) - 1)) - q_prod + r_bit) >> (DF_BITS(df) - 1);
1009 return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret;
1012 #define MSA_TEROP_DF(func) \
1013 void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, uint32_t wd, \
1014 uint32_t ws, uint32_t wt) \
1016 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
1017 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
1018 wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \
1023 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
1024 pwd->b[i] = msa_ ## func ## _df(df, pwd->b[i], pws->b[i], \
1029 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
1030 pwd->h[i] = msa_ ## func ## _df(df, pwd->h[i], pws->h[i], \
1035 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
1036 pwd->w[i] = msa_ ## func ## _df(df, pwd->w[i], pws->w[i], \
1041 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
1042 pwd->d[i] = msa_ ## func ## _df(df, pwd->d[i], pws->d[i], \
1053 MSA_TEROP_DF(dpadd_s)
1054 MSA_TEROP_DF(dpadd_u)
1055 MSA_TEROP_DF(dpsub_s)
1056 MSA_TEROP_DF(dpsub_u)
1059 MSA_TEROP_DF(madd_q)
1060 MSA_TEROP_DF(msub_q)
1061 MSA_TEROP_DF(maddr_q)
1062 MSA_TEROP_DF(msubr_q)
1065 static inline void msa_splat_df(uint32_t df, wr_t *pwd,
1066 wr_t *pws, target_ulong rt)
1068 uint32_t n = rt % DF_ELEMENTS(df);
1073 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) {
1074 pwd->b[i] = pws->b[n];
1078 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) {
1079 pwd->h[i] = pws->h[n];
1083 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
1084 pwd->w[i] = pws->w[n];
1088 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
1089 pwd->d[i] = pws->d[n];
1097 void helper_msa_splat_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
1098 uint32_t ws, uint32_t rt)
1100 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1101 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
1103 msa_splat_df(df, pwd, pws, env->active_tc.gpr[rt]);
1106 #define MSA_DO_B MSA_DO(b)
1107 #define MSA_DO_H MSA_DO(h)
1108 #define MSA_DO_W MSA_DO(w)
1109 #define MSA_DO_D MSA_DO(d)
1111 #define MSA_LOOP_B MSA_LOOP(B)
1112 #define MSA_LOOP_H MSA_LOOP(H)
1113 #define MSA_LOOP_W MSA_LOOP(W)
1114 #define MSA_LOOP_D MSA_LOOP(D)
1116 #define MSA_LOOP_COND_B MSA_LOOP_COND(DF_BYTE)
1117 #define MSA_LOOP_COND_H MSA_LOOP_COND(DF_HALF)
1118 #define MSA_LOOP_COND_W MSA_LOOP_COND(DF_WORD)
1119 #define MSA_LOOP_COND_D MSA_LOOP_COND(DF_DOUBLE)
1121 #define MSA_LOOP(DF) \
1122 for (i = 0; i < (MSA_LOOP_COND_ ## DF) ; i++) { \
1126 #define MSA_FN_DF(FUNC) \
1127 void helper_msa_##FUNC(CPUMIPSState *env, uint32_t df, uint32_t wd, \
1128 uint32_t ws, uint32_t wt) \
1130 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
1131 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
1132 wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \
1133 wr_t wx, *pwx = &wx; \
1151 msa_move_v(pwd, pwx); \
1154 #define MSA_LOOP_COND(DF) \
1155 (DF_ELEMENTS(DF) / 2)
1157 #define Rb(pwr, i) (pwr->b[i])
1158 #define Lb(pwr, i) (pwr->b[i + DF_ELEMENTS(DF_BYTE)/2])
1159 #define Rh(pwr, i) (pwr->h[i])
1160 #define Lh(pwr, i) (pwr->h[i + DF_ELEMENTS(DF_HALF)/2])
1161 #define Rw(pwr, i) (pwr->w[i])
1162 #define Lw(pwr, i) (pwr->w[i + DF_ELEMENTS(DF_WORD)/2])
1163 #define Rd(pwr, i) (pwr->d[i])
1164 #define Ld(pwr, i) (pwr->d[i + DF_ELEMENTS(DF_DOUBLE)/2])
1166 #define MSA_DO(DF) \
1168 R##DF(pwx, i) = pwt->DF[2*i]; \
1169 L##DF(pwx, i) = pws->DF[2*i]; \
1174 #define MSA_DO(DF) \
1176 R##DF(pwx, i) = pwt->DF[2*i+1]; \
1177 L##DF(pwx, i) = pws->DF[2*i+1]; \
1182 #define MSA_DO(DF) \
1184 pwx->DF[2*i] = L##DF(pwt, i); \
1185 pwx->DF[2*i+1] = L##DF(pws, i); \
1190 #define MSA_DO(DF) \
1192 pwx->DF[2*i] = R##DF(pwt, i); \
1193 pwx->DF[2*i+1] = R##DF(pws, i); \
1198 #define MSA_DO(DF) \
1200 pwx->DF[2*i] = pwt->DF[2*i]; \
1201 pwx->DF[2*i+1] = pws->DF[2*i]; \
1206 #define MSA_DO(DF) \
1208 pwx->DF[2*i] = pwt->DF[2*i+1]; \
1209 pwx->DF[2*i+1] = pws->DF[2*i+1]; \
1213 #undef MSA_LOOP_COND
1215 #define MSA_LOOP_COND(DF) \
1218 #define MSA_DO(DF) \
1220 uint32_t n = DF_ELEMENTS(df); \
1221 uint32_t k = (pwd->DF[i] & 0x3f) % (2 * n); \
1223 (pwd->DF[i] & 0xc0) ? 0 : k < n ? pwt->DF[k] : pws->DF[k - n]; \
1227 #undef MSA_LOOP_COND
1230 void helper_msa_sldi_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
1231 uint32_t ws, uint32_t n)
1233 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1234 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
1236 msa_sld_df(df, pwd, pws, n);
1239 void helper_msa_splati_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
1240 uint32_t ws, uint32_t n)
1242 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1243 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
1245 msa_splat_df(df, pwd, pws, n);
1248 void helper_msa_copy_s_df(CPUMIPSState *env, uint32_t df, uint32_t rd,
1249 uint32_t ws, uint32_t n)
1251 n %= DF_ELEMENTS(df);
1255 env->active_tc.gpr[rd] = (int8_t)env->active_fpu.fpr[ws].wr.b[n];
1258 env->active_tc.gpr[rd] = (int16_t)env->active_fpu.fpr[ws].wr.h[n];
1261 env->active_tc.gpr[rd] = (int32_t)env->active_fpu.fpr[ws].wr.w[n];
1263 #ifdef TARGET_MIPS64
1265 env->active_tc.gpr[rd] = (int64_t)env->active_fpu.fpr[ws].wr.d[n];
1273 void helper_msa_copy_u_df(CPUMIPSState *env, uint32_t df, uint32_t rd,
1274 uint32_t ws, uint32_t n)
1276 n %= DF_ELEMENTS(df);
1280 env->active_tc.gpr[rd] = (uint8_t)env->active_fpu.fpr[ws].wr.b[n];
1283 env->active_tc.gpr[rd] = (uint16_t)env->active_fpu.fpr[ws].wr.h[n];
1286 env->active_tc.gpr[rd] = (uint32_t)env->active_fpu.fpr[ws].wr.w[n];
1288 #ifdef TARGET_MIPS64
1290 env->active_tc.gpr[rd] = (uint64_t)env->active_fpu.fpr[ws].wr.d[n];
1298 void helper_msa_insert_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
1299 uint32_t rs_num, uint32_t n)
1301 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1302 target_ulong rs = env->active_tc.gpr[rs_num];
1306 pwd->b[n] = (int8_t)rs;
1309 pwd->h[n] = (int16_t)rs;
1312 pwd->w[n] = (int32_t)rs;
1315 pwd->d[n] = (int64_t)rs;
1322 void helper_msa_insve_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
1323 uint32_t ws, uint32_t n)
1325 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1326 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
1330 pwd->b[n] = (int8_t)pws->b[0];
1333 pwd->h[n] = (int16_t)pws->h[0];
1336 pwd->w[n] = (int32_t)pws->w[0];
1339 pwd->d[n] = (int64_t)pws->d[0];
1346 void helper_msa_ctcmsa(CPUMIPSState *env, target_ulong elm, uint32_t cd)
1352 env->active_tc.msacsr = (int32_t)elm & MSACSR_MASK;
1353 restore_msa_fp_status(env);
1354 /* check exception */
1355 if ((GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED)
1356 & GET_FP_CAUSE(env->active_tc.msacsr)) {
1357 do_raise_exception(env, EXCP_MSAFPE, GETPC());
1363 target_ulong helper_msa_cfcmsa(CPUMIPSState *env, uint32_t cs)
1369 return env->active_tc.msacsr & MSACSR_MASK;
1374 void helper_msa_move_v(CPUMIPSState *env, uint32_t wd, uint32_t ws)
1376 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1377 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
1379 msa_move_v(pwd, pws);
1382 static inline int64_t msa_pcnt_df(uint32_t df, int64_t arg)
1386 x = UNSIGNED(arg, df);
1388 x = (x & 0x5555555555555555ULL) + ((x >> 1) & 0x5555555555555555ULL);
1389 x = (x & 0x3333333333333333ULL) + ((x >> 2) & 0x3333333333333333ULL);
1390 x = (x & 0x0F0F0F0F0F0F0F0FULL) + ((x >> 4) & 0x0F0F0F0F0F0F0F0FULL);
1391 x = (x & 0x00FF00FF00FF00FFULL) + ((x >> 8) & 0x00FF00FF00FF00FFULL);
1392 x = (x & 0x0000FFFF0000FFFFULL) + ((x >> 16) & 0x0000FFFF0000FFFFULL);
1393 x = (x & 0x00000000FFFFFFFFULL) + ((x >> 32));
1398 static inline int64_t msa_nlzc_df(uint32_t df, int64_t arg)
1403 x = UNSIGNED(arg, df);
1405 c = DF_BITS(df) / 2;
1419 static inline int64_t msa_nloc_df(uint32_t df, int64_t arg)
1421 return msa_nlzc_df(df, UNSIGNED((~arg), df));
1424 void helper_msa_fill_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
1427 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1432 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) {
1433 pwd->b[i] = (int8_t)env->active_tc.gpr[rs];
1437 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) {
1438 pwd->h[i] = (int16_t)env->active_tc.gpr[rs];
1442 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
1443 pwd->w[i] = (int32_t)env->active_tc.gpr[rs];
1447 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
1448 pwd->d[i] = (int64_t)env->active_tc.gpr[rs];
1456 #define MSA_UNOP_DF(func) \
1457 void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, \
1458 uint32_t wd, uint32_t ws) \
1460 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
1461 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
1466 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
1467 pwd->b[i] = msa_ ## func ## _df(df, pws->b[i]); \
1471 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
1472 pwd->h[i] = msa_ ## func ## _df(df, pws->h[i]); \
1476 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
1477 pwd->w[i] = msa_ ## func ## _df(df, pws->w[i]); \
1481 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
1482 pwd->d[i] = msa_ ## func ## _df(df, pws->d[i]); \
1495 #define FLOAT_ONE32 make_float32(0x3f8 << 20)
1496 #define FLOAT_ONE64 make_float64(0x3ffULL << 52)
1498 #define FLOAT_SNAN16(s) (float16_default_nan(s) ^ 0x0220)
1500 #define FLOAT_SNAN32(s) (float32_default_nan(s) ^ 0x00400020)
1502 #define FLOAT_SNAN64(s) (float64_default_nan(s) ^ 0x0008000000000020ULL)
1503 /* 0x7ff0000000000020 */
1505 static inline void clear_msacsr_cause(CPUMIPSState *env)
1507 SET_FP_CAUSE(env->active_tc.msacsr, 0);
1510 static inline void check_msacsr_cause(CPUMIPSState *env, uintptr_t retaddr)
1512 if ((GET_FP_CAUSE(env->active_tc.msacsr) &
1513 (GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED)) == 0) {
1514 UPDATE_FP_FLAGS(env->active_tc.msacsr,
1515 GET_FP_CAUSE(env->active_tc.msacsr));
1517 do_raise_exception(env, EXCP_MSAFPE, retaddr);
1521 /* Flush-to-zero use cases for update_msacsr() */
1522 #define CLEAR_FS_UNDERFLOW 1
1523 #define CLEAR_IS_INEXACT 2
1524 #define RECIPROCAL_INEXACT 4
1526 static inline int update_msacsr(CPUMIPSState *env, int action, int denormal)
1534 ieee_ex = get_float_exception_flags(&env->active_tc.msa_fp_status);
1536 /* QEMU softfloat does not signal all underflow cases */
1538 ieee_ex |= float_flag_underflow;
1541 c = ieee_ex_to_mips(ieee_ex);
1542 enable = GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED;
1544 /* Set Inexact (I) when flushing inputs to zero */
1545 if ((ieee_ex & float_flag_input_denormal) &&
1546 (env->active_tc.msacsr & MSACSR_FS_MASK) != 0) {
1547 if (action & CLEAR_IS_INEXACT) {
1554 /* Set Inexact (I) and Underflow (U) when flushing outputs to zero */
1555 if ((ieee_ex & float_flag_output_denormal) &&
1556 (env->active_tc.msacsr & MSACSR_FS_MASK) != 0) {
1558 if (action & CLEAR_FS_UNDERFLOW) {
1565 /* Set Inexact (I) when Overflow (O) is not enabled */
1566 if ((c & FP_OVERFLOW) != 0 && (enable & FP_OVERFLOW) == 0) {
1570 /* Clear Exact Underflow when Underflow (U) is not enabled */
1571 if ((c & FP_UNDERFLOW) != 0 && (enable & FP_UNDERFLOW) == 0 &&
1572 (c & FP_INEXACT) == 0) {
1576 /* Reciprocal operations set only Inexact when valid and not
1578 if ((action & RECIPROCAL_INEXACT) &&
1579 (c & (FP_INVALID | FP_DIV0)) == 0) {
1583 cause = c & enable; /* all current enabled exceptions */
1586 /* No enabled exception, update the MSACSR Cause
1587 with all current exceptions */
1588 SET_FP_CAUSE(env->active_tc.msacsr,
1589 (GET_FP_CAUSE(env->active_tc.msacsr) | c));
1591 /* Current exceptions are enabled */
1592 if ((env->active_tc.msacsr & MSACSR_NX_MASK) == 0) {
1593 /* Exception(s) will trap, update MSACSR Cause
1594 with all enabled exceptions */
1595 SET_FP_CAUSE(env->active_tc.msacsr,
1596 (GET_FP_CAUSE(env->active_tc.msacsr) | c));
1603 static inline int get_enabled_exceptions(const CPUMIPSState *env, int c)
1605 int enable = GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED;
1609 static inline float16 float16_from_float32(int32_t a, flag ieee,
1610 float_status *status)
1614 f_val = float32_to_float16((float32)a, ieee, status);
1615 f_val = float16_maybe_silence_nan(f_val, status);
1617 return a < 0 ? (f_val | (1 << 15)) : f_val;
1620 static inline float32 float32_from_float64(int64_t a, float_status *status)
1624 f_val = float64_to_float32((float64)a, status);
1625 f_val = float32_maybe_silence_nan(f_val, status);
1627 return a < 0 ? (f_val | (1 << 31)) : f_val;
1630 static inline float32 float32_from_float16(int16_t a, flag ieee,
1631 float_status *status)
1635 f_val = float16_to_float32((float16)a, ieee, status);
1636 f_val = float32_maybe_silence_nan(f_val, status);
1638 return a < 0 ? (f_val | (1 << 31)) : f_val;
1641 static inline float64 float64_from_float32(int32_t a, float_status *status)
1645 f_val = float32_to_float64((float64)a, status);
1646 f_val = float64_maybe_silence_nan(f_val, status);
1648 return a < 0 ? (f_val | (1ULL << 63)) : f_val;
1651 static inline float32 float32_from_q16(int16_t a, float_status *status)
1655 /* conversion as integer and scaling */
1656 f_val = int32_to_float32(a, status);
1657 f_val = float32_scalbn(f_val, -15, status);
1662 static inline float64 float64_from_q32(int32_t a, float_status *status)
1666 /* conversion as integer and scaling */
1667 f_val = int32_to_float64(a, status);
1668 f_val = float64_scalbn(f_val, -31, status);
1673 static inline int16_t float32_to_q16(float32 a, float_status *status)
1676 int32_t q_min = 0xffff8000;
1677 int32_t q_max = 0x00007fff;
1681 if (float32_is_any_nan(a)) {
1682 float_raise(float_flag_invalid, status);
1687 a = float32_scalbn(a, 15, status);
1689 ieee_ex = get_float_exception_flags(status);
1690 set_float_exception_flags(ieee_ex & (~float_flag_underflow)
1693 if (ieee_ex & float_flag_overflow) {
1694 float_raise(float_flag_inexact, status);
1695 return (int32_t)a < 0 ? q_min : q_max;
1698 /* conversion to int */
1699 q_val = float32_to_int32(a, status);
1701 ieee_ex = get_float_exception_flags(status);
1702 set_float_exception_flags(ieee_ex & (~float_flag_underflow)
1705 if (ieee_ex & float_flag_invalid) {
1706 set_float_exception_flags(ieee_ex & (~float_flag_invalid)
1708 float_raise(float_flag_overflow | float_flag_inexact, status);
1709 return (int32_t)a < 0 ? q_min : q_max;
1712 if (q_val < q_min) {
1713 float_raise(float_flag_overflow | float_flag_inexact, status);
1714 return (int16_t)q_min;
1717 if (q_max < q_val) {
1718 float_raise(float_flag_overflow | float_flag_inexact, status);
1719 return (int16_t)q_max;
1722 return (int16_t)q_val;
1725 static inline int32_t float64_to_q32(float64 a, float_status *status)
1728 int64_t q_min = 0xffffffff80000000LL;
1729 int64_t q_max = 0x000000007fffffffLL;
1733 if (float64_is_any_nan(a)) {
1734 float_raise(float_flag_invalid, status);
1739 a = float64_scalbn(a, 31, status);
1741 ieee_ex = get_float_exception_flags(status);
1742 set_float_exception_flags(ieee_ex & (~float_flag_underflow)
1745 if (ieee_ex & float_flag_overflow) {
1746 float_raise(float_flag_inexact, status);
1747 return (int64_t)a < 0 ? q_min : q_max;
1750 /* conversion to integer */
1751 q_val = float64_to_int64(a, status);
1753 ieee_ex = get_float_exception_flags(status);
1754 set_float_exception_flags(ieee_ex & (~float_flag_underflow)
1757 if (ieee_ex & float_flag_invalid) {
1758 set_float_exception_flags(ieee_ex & (~float_flag_invalid)
1760 float_raise(float_flag_overflow | float_flag_inexact, status);
1761 return (int64_t)a < 0 ? q_min : q_max;
1764 if (q_val < q_min) {
1765 float_raise(float_flag_overflow | float_flag_inexact, status);
1766 return (int32_t)q_min;
1769 if (q_max < q_val) {
1770 float_raise(float_flag_overflow | float_flag_inexact, status);
1771 return (int32_t)q_max;
1774 return (int32_t)q_val;
1777 #define MSA_FLOAT_COND(DEST, OP, ARG1, ARG2, BITS, QUIET) \
1779 float_status *status = &env->active_tc.msa_fp_status; \
1782 set_float_exception_flags(0, status); \
1784 cond = float ## BITS ## _ ## OP(ARG1, ARG2, status); \
1786 cond = float ## BITS ## _ ## OP ## _quiet(ARG1, ARG2, status); \
1788 DEST = cond ? M_MAX_UINT(BITS) : 0; \
1789 c = update_msacsr(env, CLEAR_IS_INEXACT, 0); \
1791 if (get_enabled_exceptions(env, c)) { \
1792 DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
1796 #define MSA_FLOAT_AF(DEST, ARG1, ARG2, BITS, QUIET) \
1798 MSA_FLOAT_COND(DEST, eq, ARG1, ARG2, BITS, QUIET); \
1799 if ((DEST & M_MAX_UINT(BITS)) == M_MAX_UINT(BITS)) { \
1804 #define MSA_FLOAT_UEQ(DEST, ARG1, ARG2, BITS, QUIET) \
1806 MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \
1808 MSA_FLOAT_COND(DEST, eq, ARG1, ARG2, BITS, QUIET); \
1812 #define MSA_FLOAT_NE(DEST, ARG1, ARG2, BITS, QUIET) \
1814 MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \
1816 MSA_FLOAT_COND(DEST, lt, ARG2, ARG1, BITS, QUIET); \
1820 #define MSA_FLOAT_UNE(DEST, ARG1, ARG2, BITS, QUIET) \
1822 MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \
1824 MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \
1826 MSA_FLOAT_COND(DEST, lt, ARG2, ARG1, BITS, QUIET); \
1831 #define MSA_FLOAT_ULE(DEST, ARG1, ARG2, BITS, QUIET) \
1833 MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \
1835 MSA_FLOAT_COND(DEST, le, ARG1, ARG2, BITS, QUIET); \
1839 #define MSA_FLOAT_ULT(DEST, ARG1, ARG2, BITS, QUIET) \
1841 MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \
1843 MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \
1847 #define MSA_FLOAT_OR(DEST, ARG1, ARG2, BITS, QUIET) \
1849 MSA_FLOAT_COND(DEST, le, ARG1, ARG2, BITS, QUIET); \
1851 MSA_FLOAT_COND(DEST, le, ARG2, ARG1, BITS, QUIET); \
1855 static inline void compare_af(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
1856 wr_t *pwt, uint32_t df, int quiet,
1859 wr_t wx, *pwx = &wx;
1862 clear_msacsr_cause(env);
1866 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
1867 MSA_FLOAT_AF(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
1871 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
1872 MSA_FLOAT_AF(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
1879 check_msacsr_cause(env, retaddr);
1881 msa_move_v(pwd, pwx);
1884 static inline void compare_un(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
1885 wr_t *pwt, uint32_t df, int quiet,
1888 wr_t wx, *pwx = &wx;
1891 clear_msacsr_cause(env);
1895 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
1896 MSA_FLOAT_COND(pwx->w[i], unordered, pws->w[i], pwt->w[i], 32,
1901 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
1902 MSA_FLOAT_COND(pwx->d[i], unordered, pws->d[i], pwt->d[i], 64,
1910 check_msacsr_cause(env, retaddr);
1912 msa_move_v(pwd, pwx);
1915 static inline void compare_eq(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
1916 wr_t *pwt, uint32_t df, int quiet,
1919 wr_t wx, *pwx = &wx;
1922 clear_msacsr_cause(env);
1926 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
1927 MSA_FLOAT_COND(pwx->w[i], eq, pws->w[i], pwt->w[i], 32, quiet);
1931 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
1932 MSA_FLOAT_COND(pwx->d[i], eq, pws->d[i], pwt->d[i], 64, quiet);
1939 check_msacsr_cause(env, retaddr);
1941 msa_move_v(pwd, pwx);
1944 static inline void compare_ueq(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
1945 wr_t *pwt, uint32_t df, int quiet,
1948 wr_t wx, *pwx = &wx;
1951 clear_msacsr_cause(env);
1955 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
1956 MSA_FLOAT_UEQ(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
1960 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
1961 MSA_FLOAT_UEQ(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
1968 check_msacsr_cause(env, retaddr);
1970 msa_move_v(pwd, pwx);
1973 static inline void compare_lt(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
1974 wr_t *pwt, uint32_t df, int quiet,
1977 wr_t wx, *pwx = &wx;
1980 clear_msacsr_cause(env);
1984 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
1985 MSA_FLOAT_COND(pwx->w[i], lt, pws->w[i], pwt->w[i], 32, quiet);
1989 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
1990 MSA_FLOAT_COND(pwx->d[i], lt, pws->d[i], pwt->d[i], 64, quiet);
1997 check_msacsr_cause(env, retaddr);
1999 msa_move_v(pwd, pwx);
2002 static inline void compare_ult(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
2003 wr_t *pwt, uint32_t df, int quiet,
2006 wr_t wx, *pwx = &wx;
2009 clear_msacsr_cause(env);
2013 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2014 MSA_FLOAT_ULT(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
2018 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2019 MSA_FLOAT_ULT(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
2026 check_msacsr_cause(env, retaddr);
2028 msa_move_v(pwd, pwx);
2031 static inline void compare_le(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
2032 wr_t *pwt, uint32_t df, int quiet,
2035 wr_t wx, *pwx = &wx;
2038 clear_msacsr_cause(env);
2042 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2043 MSA_FLOAT_COND(pwx->w[i], le, pws->w[i], pwt->w[i], 32, quiet);
2047 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2048 MSA_FLOAT_COND(pwx->d[i], le, pws->d[i], pwt->d[i], 64, quiet);
2055 check_msacsr_cause(env, retaddr);
2057 msa_move_v(pwd, pwx);
2060 static inline void compare_ule(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
2061 wr_t *pwt, uint32_t df, int quiet,
2064 wr_t wx, *pwx = &wx;
2067 clear_msacsr_cause(env);
2071 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2072 MSA_FLOAT_ULE(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
2076 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2077 MSA_FLOAT_ULE(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
2084 check_msacsr_cause(env, retaddr);
2086 msa_move_v(pwd, pwx);
2089 static inline void compare_or(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
2090 wr_t *pwt, uint32_t df, int quiet,
2093 wr_t wx, *pwx = &wx;
2096 clear_msacsr_cause(env);
2100 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2101 MSA_FLOAT_OR(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
2105 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2106 MSA_FLOAT_OR(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
2113 check_msacsr_cause(env, retaddr);
2115 msa_move_v(pwd, pwx);
2118 static inline void compare_une(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
2119 wr_t *pwt, uint32_t df, int quiet,
2122 wr_t wx, *pwx = &wx;
2125 clear_msacsr_cause(env);
2129 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2130 MSA_FLOAT_UNE(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
2134 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2135 MSA_FLOAT_UNE(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
2142 check_msacsr_cause(env, retaddr);
2144 msa_move_v(pwd, pwx);
2147 static inline void compare_ne(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
2148 wr_t *pwt, uint32_t df, int quiet,
2151 wr_t wx, *pwx = &wx;
2154 clear_msacsr_cause(env);
2158 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2159 MSA_FLOAT_NE(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
2163 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2164 MSA_FLOAT_NE(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
2171 check_msacsr_cause(env, retaddr);
2173 msa_move_v(pwd, pwx);
2176 void helper_msa_fcaf_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2177 uint32_t ws, uint32_t wt)
2179 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2180 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2181 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2182 compare_af(env, pwd, pws, pwt, df, 1, GETPC());
2185 void helper_msa_fcun_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2186 uint32_t ws, uint32_t wt)
2188 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2189 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2190 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2191 compare_un(env, pwd, pws, pwt, df, 1, GETPC());
2194 void helper_msa_fceq_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2195 uint32_t ws, uint32_t wt)
2197 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2198 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2199 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2200 compare_eq(env, pwd, pws, pwt, df, 1, GETPC());
2203 void helper_msa_fcueq_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2204 uint32_t ws, uint32_t wt)
2206 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2207 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2208 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2209 compare_ueq(env, pwd, pws, pwt, df, 1, GETPC());
2212 void helper_msa_fclt_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2213 uint32_t ws, uint32_t wt)
2215 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2216 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2217 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2218 compare_lt(env, pwd, pws, pwt, df, 1, GETPC());
2221 void helper_msa_fcult_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2222 uint32_t ws, uint32_t wt)
2224 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2225 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2226 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2227 compare_ult(env, pwd, pws, pwt, df, 1, GETPC());
2230 void helper_msa_fcle_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2231 uint32_t ws, uint32_t wt)
2233 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2234 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2235 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2236 compare_le(env, pwd, pws, pwt, df, 1, GETPC());
2239 void helper_msa_fcule_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2240 uint32_t ws, uint32_t wt)
2242 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2243 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2244 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2245 compare_ule(env, pwd, pws, pwt, df, 1, GETPC());
2248 void helper_msa_fsaf_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2249 uint32_t ws, uint32_t wt)
2251 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2252 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2253 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2254 compare_af(env, pwd, pws, pwt, df, 0, GETPC());
2257 void helper_msa_fsun_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2258 uint32_t ws, uint32_t wt)
2260 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2261 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2262 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2263 compare_un(env, pwd, pws, pwt, df, 0, GETPC());
2266 void helper_msa_fseq_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2267 uint32_t ws, uint32_t wt)
2269 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2270 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2271 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2272 compare_eq(env, pwd, pws, pwt, df, 0, GETPC());
2275 void helper_msa_fsueq_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2276 uint32_t ws, uint32_t wt)
2278 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2279 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2280 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2281 compare_ueq(env, pwd, pws, pwt, df, 0, GETPC());
2284 void helper_msa_fslt_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2285 uint32_t ws, uint32_t wt)
2287 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2288 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2289 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2290 compare_lt(env, pwd, pws, pwt, df, 0, GETPC());
2293 void helper_msa_fsult_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2294 uint32_t ws, uint32_t wt)
2296 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2297 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2298 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2299 compare_ult(env, pwd, pws, pwt, df, 0, GETPC());
2302 void helper_msa_fsle_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2303 uint32_t ws, uint32_t wt)
2305 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2306 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2307 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2308 compare_le(env, pwd, pws, pwt, df, 0, GETPC());
2311 void helper_msa_fsule_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2312 uint32_t ws, uint32_t wt)
2314 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2315 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2316 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2317 compare_ule(env, pwd, pws, pwt, df, 0, GETPC());
2320 void helper_msa_fcor_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2321 uint32_t ws, uint32_t wt)
2323 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2324 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2325 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2326 compare_or(env, pwd, pws, pwt, df, 1, GETPC());
2329 void helper_msa_fcune_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2330 uint32_t ws, uint32_t wt)
2332 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2333 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2334 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2335 compare_une(env, pwd, pws, pwt, df, 1, GETPC());
2338 void helper_msa_fcne_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2339 uint32_t ws, uint32_t wt)
2341 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2342 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2343 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2344 compare_ne(env, pwd, pws, pwt, df, 1, GETPC());
2347 void helper_msa_fsor_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2348 uint32_t ws, uint32_t wt)
2350 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2351 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2352 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2353 compare_or(env, pwd, pws, pwt, df, 0, GETPC());
2356 void helper_msa_fsune_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2357 uint32_t ws, uint32_t wt)
2359 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2360 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2361 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2362 compare_une(env, pwd, pws, pwt, df, 0, GETPC());
2365 void helper_msa_fsne_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2366 uint32_t ws, uint32_t wt)
2368 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2369 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2370 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2371 compare_ne(env, pwd, pws, pwt, df, 0, GETPC());
2374 #define float16_is_zero(ARG) 0
2375 #define float16_is_zero_or_denormal(ARG) 0
2377 #define IS_DENORMAL(ARG, BITS) \
2378 (!float ## BITS ## _is_zero(ARG) \
2379 && float ## BITS ## _is_zero_or_denormal(ARG))
2381 #define MSA_FLOAT_BINOP(DEST, OP, ARG1, ARG2, BITS) \
2383 float_status *status = &env->active_tc.msa_fp_status; \
2386 set_float_exception_flags(0, status); \
2387 DEST = float ## BITS ## _ ## OP(ARG1, ARG2, status); \
2388 c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
2390 if (get_enabled_exceptions(env, c)) { \
2391 DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
2395 void helper_msa_fadd_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2396 uint32_t ws, uint32_t wt)
2398 wr_t wx, *pwx = &wx;
2399 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2400 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2401 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2404 clear_msacsr_cause(env);
2408 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2409 MSA_FLOAT_BINOP(pwx->w[i], add, pws->w[i], pwt->w[i], 32);
2413 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2414 MSA_FLOAT_BINOP(pwx->d[i], add, pws->d[i], pwt->d[i], 64);
2421 check_msacsr_cause(env, GETPC());
2422 msa_move_v(pwd, pwx);
2425 void helper_msa_fsub_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2426 uint32_t ws, uint32_t wt)
2428 wr_t wx, *pwx = &wx;
2429 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2430 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2431 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2434 clear_msacsr_cause(env);
2438 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2439 MSA_FLOAT_BINOP(pwx->w[i], sub, pws->w[i], pwt->w[i], 32);
2443 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2444 MSA_FLOAT_BINOP(pwx->d[i], sub, pws->d[i], pwt->d[i], 64);
2451 check_msacsr_cause(env, GETPC());
2452 msa_move_v(pwd, pwx);
2455 void helper_msa_fmul_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2456 uint32_t ws, uint32_t wt)
2458 wr_t wx, *pwx = &wx;
2459 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2460 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2461 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2464 clear_msacsr_cause(env);
2468 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2469 MSA_FLOAT_BINOP(pwx->w[i], mul, pws->w[i], pwt->w[i], 32);
2473 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2474 MSA_FLOAT_BINOP(pwx->d[i], mul, pws->d[i], pwt->d[i], 64);
2481 check_msacsr_cause(env, GETPC());
2483 msa_move_v(pwd, pwx);
2486 void helper_msa_fdiv_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2487 uint32_t ws, uint32_t wt)
2489 wr_t wx, *pwx = &wx;
2490 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2491 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2492 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2495 clear_msacsr_cause(env);
2499 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2500 MSA_FLOAT_BINOP(pwx->w[i], div, pws->w[i], pwt->w[i], 32);
2504 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2505 MSA_FLOAT_BINOP(pwx->d[i], div, pws->d[i], pwt->d[i], 64);
2512 check_msacsr_cause(env, GETPC());
2514 msa_move_v(pwd, pwx);
2517 #define MSA_FLOAT_MULADD(DEST, ARG1, ARG2, ARG3, NEGATE, BITS) \
2519 float_status *status = &env->active_tc.msa_fp_status; \
2522 set_float_exception_flags(0, status); \
2523 DEST = float ## BITS ## _muladd(ARG2, ARG3, ARG1, NEGATE, status); \
2524 c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
2526 if (get_enabled_exceptions(env, c)) { \
2527 DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
2531 void helper_msa_fmadd_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2532 uint32_t ws, uint32_t wt)
2534 wr_t wx, *pwx = &wx;
2535 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2536 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2537 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2540 clear_msacsr_cause(env);
2544 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2545 MSA_FLOAT_MULADD(pwx->w[i], pwd->w[i],
2546 pws->w[i], pwt->w[i], 0, 32);
2550 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2551 MSA_FLOAT_MULADD(pwx->d[i], pwd->d[i],
2552 pws->d[i], pwt->d[i], 0, 64);
2559 check_msacsr_cause(env, GETPC());
2561 msa_move_v(pwd, pwx);
2564 void helper_msa_fmsub_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2565 uint32_t ws, uint32_t wt)
2567 wr_t wx, *pwx = &wx;
2568 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2569 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2570 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2573 clear_msacsr_cause(env);
2577 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2578 MSA_FLOAT_MULADD(pwx->w[i], pwd->w[i],
2579 pws->w[i], pwt->w[i],
2580 float_muladd_negate_product, 32);
2584 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2585 MSA_FLOAT_MULADD(pwx->d[i], pwd->d[i],
2586 pws->d[i], pwt->d[i],
2587 float_muladd_negate_product, 64);
2594 check_msacsr_cause(env, GETPC());
2596 msa_move_v(pwd, pwx);
2599 void helper_msa_fexp2_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2600 uint32_t ws, uint32_t wt)
2602 wr_t wx, *pwx = &wx;
2603 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2604 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2605 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2608 clear_msacsr_cause(env);
2612 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2613 MSA_FLOAT_BINOP(pwx->w[i], scalbn, pws->w[i],
2614 pwt->w[i] > 0x200 ? 0x200 :
2615 pwt->w[i] < -0x200 ? -0x200 : pwt->w[i],
2620 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2621 MSA_FLOAT_BINOP(pwx->d[i], scalbn, pws->d[i],
2622 pwt->d[i] > 0x1000 ? 0x1000 :
2623 pwt->d[i] < -0x1000 ? -0x1000 : pwt->d[i],
2631 check_msacsr_cause(env, GETPC());
2633 msa_move_v(pwd, pwx);
2636 #define MSA_FLOAT_UNOP(DEST, OP, ARG, BITS) \
2638 float_status *status = &env->active_tc.msa_fp_status; \
2641 set_float_exception_flags(0, status); \
2642 DEST = float ## BITS ## _ ## OP(ARG, status); \
2643 c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
2645 if (get_enabled_exceptions(env, c)) { \
2646 DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
2650 void helper_msa_fexdo_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2651 uint32_t ws, uint32_t wt)
2653 wr_t wx, *pwx = &wx;
2654 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2655 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2656 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2659 clear_msacsr_cause(env);
2663 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2664 /* Half precision floats come in two formats: standard
2665 IEEE and "ARM" format. The latter gains extra exponent
2666 range by omitting the NaN/Inf encodings. */
2669 MSA_FLOAT_BINOP(Lh(pwx, i), from_float32, pws->w[i], ieee, 16);
2670 MSA_FLOAT_BINOP(Rh(pwx, i), from_float32, pwt->w[i], ieee, 16);
2674 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2675 MSA_FLOAT_UNOP(Lw(pwx, i), from_float64, pws->d[i], 32);
2676 MSA_FLOAT_UNOP(Rw(pwx, i), from_float64, pwt->d[i], 32);
2683 check_msacsr_cause(env, GETPC());
2684 msa_move_v(pwd, pwx);
2687 #define MSA_FLOAT_UNOP_XD(DEST, OP, ARG, BITS, XBITS) \
2689 float_status *status = &env->active_tc.msa_fp_status; \
2692 set_float_exception_flags(0, status); \
2693 DEST = float ## BITS ## _ ## OP(ARG, status); \
2694 c = update_msacsr(env, CLEAR_FS_UNDERFLOW, 0); \
2696 if (get_enabled_exceptions(env, c)) { \
2697 DEST = ((FLOAT_SNAN ## XBITS(status) >> 6) << 6) | c; \
2701 void helper_msa_ftq_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2702 uint32_t ws, uint32_t wt)
2704 wr_t wx, *pwx = &wx;
2705 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2706 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2707 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2710 clear_msacsr_cause(env);
2714 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2715 MSA_FLOAT_UNOP_XD(Lh(pwx, i), to_q16, pws->w[i], 32, 16);
2716 MSA_FLOAT_UNOP_XD(Rh(pwx, i), to_q16, pwt->w[i], 32, 16);
2720 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2721 MSA_FLOAT_UNOP_XD(Lw(pwx, i), to_q32, pws->d[i], 64, 32);
2722 MSA_FLOAT_UNOP_XD(Rw(pwx, i), to_q32, pwt->d[i], 64, 32);
2729 check_msacsr_cause(env, GETPC());
2731 msa_move_v(pwd, pwx);
2734 #define NUMBER_QNAN_PAIR(ARG1, ARG2, BITS, STATUS) \
2735 !float ## BITS ## _is_any_nan(ARG1) \
2736 && float ## BITS ## _is_quiet_nan(ARG2, STATUS)
2738 #define MSA_FLOAT_MAXOP(DEST, OP, ARG1, ARG2, BITS) \
2740 float_status *status = &env->active_tc.msa_fp_status; \
2743 set_float_exception_flags(0, status); \
2744 DEST = float ## BITS ## _ ## OP(ARG1, ARG2, status); \
2745 c = update_msacsr(env, 0, 0); \
2747 if (get_enabled_exceptions(env, c)) { \
2748 DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
2752 #define FMAXMIN_A(F, G, X, _S, _T, BITS, STATUS) \
2754 uint## BITS ##_t S = _S, T = _T; \
2755 uint## BITS ##_t as, at, xs, xt, xd; \
2756 if (NUMBER_QNAN_PAIR(S, T, BITS, STATUS)) { \
2759 else if (NUMBER_QNAN_PAIR(T, S, BITS, STATUS)) { \
2762 as = float## BITS ##_abs(S); \
2763 at = float## BITS ##_abs(T); \
2764 MSA_FLOAT_MAXOP(xs, F, S, T, BITS); \
2765 MSA_FLOAT_MAXOP(xt, G, S, T, BITS); \
2766 MSA_FLOAT_MAXOP(xd, F, as, at, BITS); \
2767 X = (as == at || xd == float## BITS ##_abs(xs)) ? xs : xt; \
2770 void helper_msa_fmin_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2771 uint32_t ws, uint32_t wt)
2773 float_status *status = &env->active_tc.msa_fp_status;
2774 wr_t wx, *pwx = &wx;
2775 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2776 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2777 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2780 clear_msacsr_cause(env);
2784 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2785 if (NUMBER_QNAN_PAIR(pws->w[i], pwt->w[i], 32, status)) {
2786 MSA_FLOAT_MAXOP(pwx->w[i], min, pws->w[i], pws->w[i], 32);
2787 } else if (NUMBER_QNAN_PAIR(pwt->w[i], pws->w[i], 32, status)) {
2788 MSA_FLOAT_MAXOP(pwx->w[i], min, pwt->w[i], pwt->w[i], 32);
2790 MSA_FLOAT_MAXOP(pwx->w[i], min, pws->w[i], pwt->w[i], 32);
2795 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2796 if (NUMBER_QNAN_PAIR(pws->d[i], pwt->d[i], 64, status)) {
2797 MSA_FLOAT_MAXOP(pwx->d[i], min, pws->d[i], pws->d[i], 64);
2798 } else if (NUMBER_QNAN_PAIR(pwt->d[i], pws->d[i], 64, status)) {
2799 MSA_FLOAT_MAXOP(pwx->d[i], min, pwt->d[i], pwt->d[i], 64);
2801 MSA_FLOAT_MAXOP(pwx->d[i], min, pws->d[i], pwt->d[i], 64);
2809 check_msacsr_cause(env, GETPC());
2811 msa_move_v(pwd, pwx);
2814 void helper_msa_fmin_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2815 uint32_t ws, uint32_t wt)
2817 float_status *status = &env->active_tc.msa_fp_status;
2818 wr_t wx, *pwx = &wx;
2819 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2820 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2821 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2824 clear_msacsr_cause(env);
2828 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2829 FMAXMIN_A(min, max, pwx->w[i], pws->w[i], pwt->w[i], 32, status);
2833 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2834 FMAXMIN_A(min, max, pwx->d[i], pws->d[i], pwt->d[i], 64, status);
2841 check_msacsr_cause(env, GETPC());
2843 msa_move_v(pwd, pwx);
2846 void helper_msa_fmax_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2847 uint32_t ws, uint32_t wt)
2849 float_status *status = &env->active_tc.msa_fp_status;
2850 wr_t wx, *pwx = &wx;
2851 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2852 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2853 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2856 clear_msacsr_cause(env);
2860 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2861 if (NUMBER_QNAN_PAIR(pws->w[i], pwt->w[i], 32, status)) {
2862 MSA_FLOAT_MAXOP(pwx->w[i], max, pws->w[i], pws->w[i], 32);
2863 } else if (NUMBER_QNAN_PAIR(pwt->w[i], pws->w[i], 32, status)) {
2864 MSA_FLOAT_MAXOP(pwx->w[i], max, pwt->w[i], pwt->w[i], 32);
2866 MSA_FLOAT_MAXOP(pwx->w[i], max, pws->w[i], pwt->w[i], 32);
2871 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2872 if (NUMBER_QNAN_PAIR(pws->d[i], pwt->d[i], 64, status)) {
2873 MSA_FLOAT_MAXOP(pwx->d[i], max, pws->d[i], pws->d[i], 64);
2874 } else if (NUMBER_QNAN_PAIR(pwt->d[i], pws->d[i], 64, status)) {
2875 MSA_FLOAT_MAXOP(pwx->d[i], max, pwt->d[i], pwt->d[i], 64);
2877 MSA_FLOAT_MAXOP(pwx->d[i], max, pws->d[i], pwt->d[i], 64);
2885 check_msacsr_cause(env, GETPC());
2887 msa_move_v(pwd, pwx);
2890 void helper_msa_fmax_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2891 uint32_t ws, uint32_t wt)
2893 float_status *status = &env->active_tc.msa_fp_status;
2894 wr_t wx, *pwx = &wx;
2895 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2896 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2897 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2900 clear_msacsr_cause(env);
2904 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2905 FMAXMIN_A(max, min, pwx->w[i], pws->w[i], pwt->w[i], 32, status);
2909 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2910 FMAXMIN_A(max, min, pwx->d[i], pws->d[i], pwt->d[i], 64, status);
2917 check_msacsr_cause(env, GETPC());
2919 msa_move_v(pwd, pwx);
2922 void helper_msa_fclass_df(CPUMIPSState *env, uint32_t df,
2923 uint32_t wd, uint32_t ws)
2925 float_status* status = &env->active_tc.msa_fp_status;
2927 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2928 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2929 if (df == DF_WORD) {
2930 pwd->w[0] = float_class_s(pws->w[0], status);
2931 pwd->w[1] = float_class_s(pws->w[1], status);
2932 pwd->w[2] = float_class_s(pws->w[2], status);
2933 pwd->w[3] = float_class_s(pws->w[3], status);
2935 pwd->d[0] = float_class_d(pws->d[0], status);
2936 pwd->d[1] = float_class_d(pws->d[1], status);
2940 #define MSA_FLOAT_UNOP0(DEST, OP, ARG, BITS) \
2942 float_status *status = &env->active_tc.msa_fp_status; \
2945 set_float_exception_flags(0, status); \
2946 DEST = float ## BITS ## _ ## OP(ARG, status); \
2947 c = update_msacsr(env, CLEAR_FS_UNDERFLOW, 0); \
2949 if (get_enabled_exceptions(env, c)) { \
2950 DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
2951 } else if (float ## BITS ## _is_any_nan(ARG)) { \
2956 void helper_msa_ftrunc_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2959 wr_t wx, *pwx = &wx;
2960 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2961 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2964 clear_msacsr_cause(env);
2968 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2969 MSA_FLOAT_UNOP0(pwx->w[i], to_int32_round_to_zero, pws->w[i], 32);
2973 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2974 MSA_FLOAT_UNOP0(pwx->d[i], to_int64_round_to_zero, pws->d[i], 64);
2981 check_msacsr_cause(env, GETPC());
2983 msa_move_v(pwd, pwx);
2986 void helper_msa_ftrunc_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2989 wr_t wx, *pwx = &wx;
2990 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2991 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2994 clear_msacsr_cause(env);
2998 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2999 MSA_FLOAT_UNOP0(pwx->w[i], to_uint32_round_to_zero, pws->w[i], 32);
3003 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3004 MSA_FLOAT_UNOP0(pwx->d[i], to_uint64_round_to_zero, pws->d[i], 64);
3011 check_msacsr_cause(env, GETPC());
3013 msa_move_v(pwd, pwx);
3016 void helper_msa_fsqrt_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3019 wr_t wx, *pwx = &wx;
3020 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3021 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3024 clear_msacsr_cause(env);
3028 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3029 MSA_FLOAT_UNOP(pwx->w[i], sqrt, pws->w[i], 32);
3033 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3034 MSA_FLOAT_UNOP(pwx->d[i], sqrt, pws->d[i], 64);
3041 check_msacsr_cause(env, GETPC());
3043 msa_move_v(pwd, pwx);
3046 #define MSA_FLOAT_RECIPROCAL(DEST, ARG, BITS) \
3048 float_status *status = &env->active_tc.msa_fp_status; \
3051 set_float_exception_flags(0, status); \
3052 DEST = float ## BITS ## _ ## div(FLOAT_ONE ## BITS, ARG, status); \
3053 c = update_msacsr(env, float ## BITS ## _is_infinity(ARG) || \
3054 float ## BITS ## _is_quiet_nan(DEST, status) ? \
3055 0 : RECIPROCAL_INEXACT, \
3056 IS_DENORMAL(DEST, BITS)); \
3058 if (get_enabled_exceptions(env, c)) { \
3059 DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
3063 void helper_msa_frsqrt_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3066 wr_t wx, *pwx = &wx;
3067 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3068 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3071 clear_msacsr_cause(env);
3075 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3076 MSA_FLOAT_RECIPROCAL(pwx->w[i], float32_sqrt(pws->w[i],
3077 &env->active_tc.msa_fp_status), 32);
3081 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3082 MSA_FLOAT_RECIPROCAL(pwx->d[i], float64_sqrt(pws->d[i],
3083 &env->active_tc.msa_fp_status), 64);
3090 check_msacsr_cause(env, GETPC());
3092 msa_move_v(pwd, pwx);
3095 void helper_msa_frcp_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3098 wr_t wx, *pwx = &wx;
3099 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3100 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3103 clear_msacsr_cause(env);
3107 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3108 MSA_FLOAT_RECIPROCAL(pwx->w[i], pws->w[i], 32);
3112 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3113 MSA_FLOAT_RECIPROCAL(pwx->d[i], pws->d[i], 64);
3120 check_msacsr_cause(env, GETPC());
3122 msa_move_v(pwd, pwx);
3125 void helper_msa_frint_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3128 wr_t wx, *pwx = &wx;
3129 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3130 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3133 clear_msacsr_cause(env);
3137 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3138 MSA_FLOAT_UNOP(pwx->w[i], round_to_int, pws->w[i], 32);
3142 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3143 MSA_FLOAT_UNOP(pwx->d[i], round_to_int, pws->d[i], 64);
3150 check_msacsr_cause(env, GETPC());
3152 msa_move_v(pwd, pwx);
3155 #define MSA_FLOAT_LOGB(DEST, ARG, BITS) \
3157 float_status *status = &env->active_tc.msa_fp_status; \
3160 set_float_exception_flags(0, status); \
3161 set_float_rounding_mode(float_round_down, status); \
3162 DEST = float ## BITS ## _ ## log2(ARG, status); \
3163 DEST = float ## BITS ## _ ## round_to_int(DEST, status); \
3164 set_float_rounding_mode(ieee_rm[(env->active_tc.msacsr & \
3165 MSACSR_RM_MASK) >> MSACSR_RM], \
3168 set_float_exception_flags(get_float_exception_flags(status) & \
3169 (~float_flag_inexact), \
3172 c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
3174 if (get_enabled_exceptions(env, c)) { \
3175 DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
3179 void helper_msa_flog2_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3182 wr_t wx, *pwx = &wx;
3183 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3184 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3187 clear_msacsr_cause(env);
3191 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3192 MSA_FLOAT_LOGB(pwx->w[i], pws->w[i], 32);
3196 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3197 MSA_FLOAT_LOGB(pwx->d[i], pws->d[i], 64);
3204 check_msacsr_cause(env, GETPC());
3206 msa_move_v(pwd, pwx);
3209 void helper_msa_fexupl_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3212 wr_t wx, *pwx = &wx;
3213 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3214 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3217 clear_msacsr_cause(env);
3221 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3222 /* Half precision floats come in two formats: standard
3223 IEEE and "ARM" format. The latter gains extra exponent
3224 range by omitting the NaN/Inf encodings. */
3227 MSA_FLOAT_BINOP(pwx->w[i], from_float16, Lh(pws, i), ieee, 32);
3231 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3232 MSA_FLOAT_UNOP(pwx->d[i], from_float32, Lw(pws, i), 64);
3239 check_msacsr_cause(env, GETPC());
3240 msa_move_v(pwd, pwx);
3243 void helper_msa_fexupr_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3246 wr_t wx, *pwx = &wx;
3247 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3248 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3251 clear_msacsr_cause(env);
3255 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3256 /* Half precision floats come in two formats: standard
3257 IEEE and "ARM" format. The latter gains extra exponent
3258 range by omitting the NaN/Inf encodings. */
3261 MSA_FLOAT_BINOP(pwx->w[i], from_float16, Rh(pws, i), ieee, 32);
3265 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3266 MSA_FLOAT_UNOP(pwx->d[i], from_float32, Rw(pws, i), 64);
3273 check_msacsr_cause(env, GETPC());
3274 msa_move_v(pwd, pwx);
3277 void helper_msa_ffql_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3280 wr_t wx, *pwx = &wx;
3281 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3282 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3287 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3288 MSA_FLOAT_UNOP(pwx->w[i], from_q16, Lh(pws, i), 32);
3292 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3293 MSA_FLOAT_UNOP(pwx->d[i], from_q32, Lw(pws, i), 64);
3300 msa_move_v(pwd, pwx);
3303 void helper_msa_ffqr_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3306 wr_t wx, *pwx = &wx;
3307 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3308 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3313 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3314 MSA_FLOAT_UNOP(pwx->w[i], from_q16, Rh(pws, i), 32);
3318 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3319 MSA_FLOAT_UNOP(pwx->d[i], from_q32, Rw(pws, i), 64);
3326 msa_move_v(pwd, pwx);
3329 void helper_msa_ftint_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3332 wr_t wx, *pwx = &wx;
3333 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3334 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3337 clear_msacsr_cause(env);
3341 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3342 MSA_FLOAT_UNOP0(pwx->w[i], to_int32, pws->w[i], 32);
3346 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3347 MSA_FLOAT_UNOP0(pwx->d[i], to_int64, pws->d[i], 64);
3354 check_msacsr_cause(env, GETPC());
3356 msa_move_v(pwd, pwx);
3359 void helper_msa_ftint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3362 wr_t wx, *pwx = &wx;
3363 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3364 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3367 clear_msacsr_cause(env);
3371 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3372 MSA_FLOAT_UNOP0(pwx->w[i], to_uint32, pws->w[i], 32);
3376 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3377 MSA_FLOAT_UNOP0(pwx->d[i], to_uint64, pws->d[i], 64);
3384 check_msacsr_cause(env, GETPC());
3386 msa_move_v(pwd, pwx);
3389 #define float32_from_int32 int32_to_float32
3390 #define float32_from_uint32 uint32_to_float32
3392 #define float64_from_int64 int64_to_float64
3393 #define float64_from_uint64 uint64_to_float64
3395 void helper_msa_ffint_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3398 wr_t wx, *pwx = &wx;
3399 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3400 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3403 clear_msacsr_cause(env);
3407 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3408 MSA_FLOAT_UNOP(pwx->w[i], from_int32, pws->w[i], 32);
3412 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3413 MSA_FLOAT_UNOP(pwx->d[i], from_int64, pws->d[i], 64);
3420 check_msacsr_cause(env, GETPC());
3422 msa_move_v(pwd, pwx);
3425 void helper_msa_ffint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3428 wr_t wx, *pwx = &wx;
3429 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3430 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3433 clear_msacsr_cause(env);
3437 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3438 MSA_FLOAT_UNOP(pwx->w[i], from_uint32, pws->w[i], 32);
3442 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3443 MSA_FLOAT_UNOP(pwx->d[i], from_uint64, pws->d[i], 64);
3450 check_msacsr_cause(env, GETPC());
3452 msa_move_v(pwd, pwx);