2 * MIPS SIMD Architecture Module Instruction emulation helpers for QEMU.
4 * Copyright (c) 2014 Imagination Technologies
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "exec/helper-proto.h"
23 /* Data format min and max values */
24 #define DF_BITS(df) (1 << ((df) + 3))
26 #define DF_MAX_INT(df) (int64_t)((1LL << (DF_BITS(df) - 1)) - 1)
27 #define M_MAX_INT(m) (int64_t)((1LL << ((m) - 1)) - 1)
29 #define DF_MIN_INT(df) (int64_t)(-(1LL << (DF_BITS(df) - 1)))
30 #define M_MIN_INT(m) (int64_t)(-(1LL << ((m) - 1)))
32 #define DF_MAX_UINT(df) (uint64_t)(-1ULL >> (64 - DF_BITS(df)))
33 #define M_MAX_UINT(m) (uint64_t)(-1ULL >> (64 - (m)))
35 #define UNSIGNED(x, df) ((x) & DF_MAX_UINT(df))
36 #define SIGNED(x, df) \
37 ((((int64_t)x) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df)))
39 /* Element-by-element access macros */
40 #define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df))
42 static inline void msa_move_v(wr_t *pwd, wr_t *pws)
46 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
47 pwd->d[i] = pws->d[i];
51 #define MSA_FN_IMM8(FUNC, DEST, OPERATION) \
52 void helper_msa_ ## FUNC(CPUMIPSState *env, uint32_t wd, uint32_t ws, \
55 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
56 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
58 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
63 MSA_FN_IMM8(andi_b, pwd->b[i], pws->b[i] & i8)
64 MSA_FN_IMM8(ori_b, pwd->b[i], pws->b[i] | i8)
65 MSA_FN_IMM8(nori_b, pwd->b[i], ~(pws->b[i] | i8))
66 MSA_FN_IMM8(xori_b, pwd->b[i], pws->b[i] ^ i8)
68 #define BIT_MOVE_IF_NOT_ZERO(dest, arg1, arg2, df) \
69 UNSIGNED(((dest & (~arg2)) | (arg1 & arg2)), df)
70 MSA_FN_IMM8(bmnzi_b, pwd->b[i],
71 BIT_MOVE_IF_NOT_ZERO(pwd->b[i], pws->b[i], i8, DF_BYTE))
73 #define BIT_MOVE_IF_ZERO(dest, arg1, arg2, df) \
74 UNSIGNED((dest & arg2) | (arg1 & (~arg2)), df)
75 MSA_FN_IMM8(bmzi_b, pwd->b[i],
76 BIT_MOVE_IF_ZERO(pwd->b[i], pws->b[i], i8, DF_BYTE))
78 #define BIT_SELECT(dest, arg1, arg2, df) \
79 UNSIGNED((arg1 & (~dest)) | (arg2 & dest), df)
80 MSA_FN_IMM8(bseli_b, pwd->b[i],
81 BIT_SELECT(pwd->b[i], pws->b[i], i8, DF_BYTE))
85 #define SHF_POS(i, imm) (((i) & 0xfc) + (((imm) >> (2 * ((i) & 0x03))) & 0x03))
87 void helper_msa_shf_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
88 uint32_t ws, uint32_t imm)
90 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
91 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
97 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) {
98 pwx->b[i] = pws->b[SHF_POS(i, imm)];
102 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) {
103 pwx->h[i] = pws->h[SHF_POS(i, imm)];
107 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
108 pwx->w[i] = pws->w[SHF_POS(i, imm)];
114 msa_move_v(pwd, pwx);
117 #define MSA_FN_VECTOR(FUNC, DEST, OPERATION) \
118 void helper_msa_ ## FUNC(CPUMIPSState *env, uint32_t wd, uint32_t ws, \
121 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
122 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
123 wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \
125 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
130 MSA_FN_VECTOR(and_v, pwd->d[i], pws->d[i] & pwt->d[i])
131 MSA_FN_VECTOR(or_v, pwd->d[i], pws->d[i] | pwt->d[i])
132 MSA_FN_VECTOR(nor_v, pwd->d[i], ~(pws->d[i] | pwt->d[i]))
133 MSA_FN_VECTOR(xor_v, pwd->d[i], pws->d[i] ^ pwt->d[i])
134 MSA_FN_VECTOR(bmnz_v, pwd->d[i],
135 BIT_MOVE_IF_NOT_ZERO(pwd->d[i], pws->d[i], pwt->d[i], DF_DOUBLE))
136 MSA_FN_VECTOR(bmz_v, pwd->d[i],
137 BIT_MOVE_IF_ZERO(pwd->d[i], pws->d[i], pwt->d[i], DF_DOUBLE))
138 MSA_FN_VECTOR(bsel_v, pwd->d[i],
139 BIT_SELECT(pwd->d[i], pws->d[i], pwt->d[i], DF_DOUBLE))
140 #undef BIT_MOVE_IF_NOT_ZERO
141 #undef BIT_MOVE_IF_ZERO
145 static inline int64_t msa_addv_df(uint32_t df, int64_t arg1, int64_t arg2)
150 static inline int64_t msa_subv_df(uint32_t df, int64_t arg1, int64_t arg2)
155 static inline int64_t msa_ceq_df(uint32_t df, int64_t arg1, int64_t arg2)
157 return arg1 == arg2 ? -1 : 0;
160 static inline int64_t msa_cle_s_df(uint32_t df, int64_t arg1, int64_t arg2)
162 return arg1 <= arg2 ? -1 : 0;
165 static inline int64_t msa_cle_u_df(uint32_t df, int64_t arg1, int64_t arg2)
167 uint64_t u_arg1 = UNSIGNED(arg1, df);
168 uint64_t u_arg2 = UNSIGNED(arg2, df);
169 return u_arg1 <= u_arg2 ? -1 : 0;
172 static inline int64_t msa_clt_s_df(uint32_t df, int64_t arg1, int64_t arg2)
174 return arg1 < arg2 ? -1 : 0;
177 static inline int64_t msa_clt_u_df(uint32_t df, int64_t arg1, int64_t arg2)
179 uint64_t u_arg1 = UNSIGNED(arg1, df);
180 uint64_t u_arg2 = UNSIGNED(arg2, df);
181 return u_arg1 < u_arg2 ? -1 : 0;
184 static inline int64_t msa_max_s_df(uint32_t df, int64_t arg1, int64_t arg2)
186 return arg1 > arg2 ? arg1 : arg2;
189 static inline int64_t msa_max_u_df(uint32_t df, int64_t arg1, int64_t arg2)
191 uint64_t u_arg1 = UNSIGNED(arg1, df);
192 uint64_t u_arg2 = UNSIGNED(arg2, df);
193 return u_arg1 > u_arg2 ? arg1 : arg2;
196 static inline int64_t msa_min_s_df(uint32_t df, int64_t arg1, int64_t arg2)
198 return arg1 < arg2 ? arg1 : arg2;
201 static inline int64_t msa_min_u_df(uint32_t df, int64_t arg1, int64_t arg2)
203 uint64_t u_arg1 = UNSIGNED(arg1, df);
204 uint64_t u_arg2 = UNSIGNED(arg2, df);
205 return u_arg1 < u_arg2 ? arg1 : arg2;
208 #define MSA_BINOP_IMM_DF(helper, func) \
209 void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, \
210 uint32_t wd, uint32_t ws, int32_t u5) \
212 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
213 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
218 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
219 pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], u5); \
223 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
224 pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], u5); \
228 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
229 pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], u5); \
233 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
234 pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], u5); \
242 MSA_BINOP_IMM_DF(addvi, addv)
243 MSA_BINOP_IMM_DF(subvi, subv)
244 MSA_BINOP_IMM_DF(ceqi, ceq)
245 MSA_BINOP_IMM_DF(clei_s, cle_s)
246 MSA_BINOP_IMM_DF(clei_u, cle_u)
247 MSA_BINOP_IMM_DF(clti_s, clt_s)
248 MSA_BINOP_IMM_DF(clti_u, clt_u)
249 MSA_BINOP_IMM_DF(maxi_s, max_s)
250 MSA_BINOP_IMM_DF(maxi_u, max_u)
251 MSA_BINOP_IMM_DF(mini_s, min_s)
252 MSA_BINOP_IMM_DF(mini_u, min_u)
253 #undef MSA_BINOP_IMM_DF
255 void helper_msa_ldi_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
258 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
263 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) {
264 pwd->b[i] = (int8_t)s10;
268 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) {
269 pwd->h[i] = (int16_t)s10;
273 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
274 pwd->w[i] = (int32_t)s10;
278 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
279 pwd->d[i] = (int64_t)s10;
287 /* Data format bit position and unsigned values */
288 #define BIT_POSITION(x, df) ((uint64_t)(x) % DF_BITS(df))
290 static inline int64_t msa_sll_df(uint32_t df, int64_t arg1, int64_t arg2)
292 int32_t b_arg2 = BIT_POSITION(arg2, df);
293 return arg1 << b_arg2;
296 static inline int64_t msa_sra_df(uint32_t df, int64_t arg1, int64_t arg2)
298 int32_t b_arg2 = BIT_POSITION(arg2, df);
299 return arg1 >> b_arg2;
302 static inline int64_t msa_srl_df(uint32_t df, int64_t arg1, int64_t arg2)
304 uint64_t u_arg1 = UNSIGNED(arg1, df);
305 int32_t b_arg2 = BIT_POSITION(arg2, df);
306 return u_arg1 >> b_arg2;
309 static inline int64_t msa_bclr_df(uint32_t df, int64_t arg1, int64_t arg2)
311 int32_t b_arg2 = BIT_POSITION(arg2, df);
312 return UNSIGNED(arg1 & (~(1LL << b_arg2)), df);
315 static inline int64_t msa_bset_df(uint32_t df, int64_t arg1,
318 int32_t b_arg2 = BIT_POSITION(arg2, df);
319 return UNSIGNED(arg1 | (1LL << b_arg2), df);
322 static inline int64_t msa_bneg_df(uint32_t df, int64_t arg1, int64_t arg2)
324 int32_t b_arg2 = BIT_POSITION(arg2, df);
325 return UNSIGNED(arg1 ^ (1LL << b_arg2), df);
328 static inline int64_t msa_binsl_df(uint32_t df, int64_t dest, int64_t arg1,
331 uint64_t u_arg1 = UNSIGNED(arg1, df);
332 uint64_t u_dest = UNSIGNED(dest, df);
333 int32_t sh_d = BIT_POSITION(arg2, df) + 1;
334 int32_t sh_a = DF_BITS(df) - sh_d;
335 if (sh_d == DF_BITS(df)) {
338 return UNSIGNED(UNSIGNED(u_dest << sh_d, df) >> sh_d, df) |
339 UNSIGNED(UNSIGNED(u_arg1 >> sh_a, df) << sh_a, df);
343 static inline int64_t msa_binsr_df(uint32_t df, int64_t dest, int64_t arg1,
346 uint64_t u_arg1 = UNSIGNED(arg1, df);
347 uint64_t u_dest = UNSIGNED(dest, df);
348 int32_t sh_d = BIT_POSITION(arg2, df) + 1;
349 int32_t sh_a = DF_BITS(df) - sh_d;
350 if (sh_d == DF_BITS(df)) {
353 return UNSIGNED(UNSIGNED(u_dest >> sh_d, df) << sh_d, df) |
354 UNSIGNED(UNSIGNED(u_arg1 << sh_a, df) >> sh_a, df);
358 static inline int64_t msa_sat_s_df(uint32_t df, int64_t arg, uint32_t m)
360 return arg < M_MIN_INT(m+1) ? M_MIN_INT(m+1) :
361 arg > M_MAX_INT(m+1) ? M_MAX_INT(m+1) :
365 static inline int64_t msa_sat_u_df(uint32_t df, int64_t arg, uint32_t m)
367 uint64_t u_arg = UNSIGNED(arg, df);
368 return u_arg < M_MAX_UINT(m+1) ? u_arg :
372 static inline int64_t msa_srar_df(uint32_t df, int64_t arg1, int64_t arg2)
374 int32_t b_arg2 = BIT_POSITION(arg2, df);
378 int64_t r_bit = (arg1 >> (b_arg2 - 1)) & 1;
379 return (arg1 >> b_arg2) + r_bit;
383 static inline int64_t msa_srlr_df(uint32_t df, int64_t arg1, int64_t arg2)
385 uint64_t u_arg1 = UNSIGNED(arg1, df);
386 int32_t b_arg2 = BIT_POSITION(arg2, df);
390 uint64_t r_bit = (u_arg1 >> (b_arg2 - 1)) & 1;
391 return (u_arg1 >> b_arg2) + r_bit;
395 #define MSA_BINOP_IMMU_DF(helper, func) \
396 void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, uint32_t wd, \
397 uint32_t ws, uint32_t u5) \
399 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
400 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
405 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
406 pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], u5); \
410 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
411 pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], u5); \
415 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
416 pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], u5); \
420 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
421 pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], u5); \
429 MSA_BINOP_IMMU_DF(slli, sll)
430 MSA_BINOP_IMMU_DF(srai, sra)
431 MSA_BINOP_IMMU_DF(srli, srl)
432 MSA_BINOP_IMMU_DF(bclri, bclr)
433 MSA_BINOP_IMMU_DF(bseti, bset)
434 MSA_BINOP_IMMU_DF(bnegi, bneg)
435 MSA_BINOP_IMMU_DF(sat_s, sat_s)
436 MSA_BINOP_IMMU_DF(sat_u, sat_u)
437 MSA_BINOP_IMMU_DF(srari, srar)
438 MSA_BINOP_IMMU_DF(srlri, srlr)
439 #undef MSA_BINOP_IMMU_DF
441 #define MSA_TEROP_IMMU_DF(helper, func) \
442 void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, \
443 uint32_t wd, uint32_t ws, uint32_t u5) \
445 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
446 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
451 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
452 pwd->b[i] = msa_ ## func ## _df(df, pwd->b[i], pws->b[i], \
457 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
458 pwd->h[i] = msa_ ## func ## _df(df, pwd->h[i], pws->h[i], \
463 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
464 pwd->w[i] = msa_ ## func ## _df(df, pwd->w[i], pws->w[i], \
469 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
470 pwd->d[i] = msa_ ## func ## _df(df, pwd->d[i], pws->d[i], \
479 MSA_TEROP_IMMU_DF(binsli, binsl)
480 MSA_TEROP_IMMU_DF(binsri, binsr)
481 #undef MSA_TEROP_IMMU_DF
483 static inline int64_t msa_max_a_df(uint32_t df, int64_t arg1, int64_t arg2)
485 uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1;
486 uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2;
487 return abs_arg1 > abs_arg2 ? arg1 : arg2;
490 static inline int64_t msa_min_a_df(uint32_t df, int64_t arg1, int64_t arg2)
492 uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1;
493 uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2;
494 return abs_arg1 < abs_arg2 ? arg1 : arg2;
497 static inline int64_t msa_add_a_df(uint32_t df, int64_t arg1, int64_t arg2)
499 uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1;
500 uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2;
501 return abs_arg1 + abs_arg2;
504 static inline int64_t msa_adds_a_df(uint32_t df, int64_t arg1, int64_t arg2)
506 uint64_t max_int = (uint64_t)DF_MAX_INT(df);
507 uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1;
508 uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2;
509 if (abs_arg1 > max_int || abs_arg2 > max_int) {
510 return (int64_t)max_int;
512 return (abs_arg1 < max_int - abs_arg2) ? abs_arg1 + abs_arg2 : max_int;
516 static inline int64_t msa_adds_s_df(uint32_t df, int64_t arg1, int64_t arg2)
518 int64_t max_int = DF_MAX_INT(df);
519 int64_t min_int = DF_MIN_INT(df);
521 return (min_int - arg1 < arg2) ? arg1 + arg2 : min_int;
523 return (arg2 < max_int - arg1) ? arg1 + arg2 : max_int;
527 static inline uint64_t msa_adds_u_df(uint32_t df, uint64_t arg1, uint64_t arg2)
529 uint64_t max_uint = DF_MAX_UINT(df);
530 uint64_t u_arg1 = UNSIGNED(arg1, df);
531 uint64_t u_arg2 = UNSIGNED(arg2, df);
532 return (u_arg1 < max_uint - u_arg2) ? u_arg1 + u_arg2 : max_uint;
535 static inline int64_t msa_ave_s_df(uint32_t df, int64_t arg1, int64_t arg2)
538 return (arg1 >> 1) + (arg2 >> 1) + (arg1 & arg2 & 1);
541 static inline uint64_t msa_ave_u_df(uint32_t df, uint64_t arg1, uint64_t arg2)
543 uint64_t u_arg1 = UNSIGNED(arg1, df);
544 uint64_t u_arg2 = UNSIGNED(arg2, df);
546 return (u_arg1 >> 1) + (u_arg2 >> 1) + (u_arg1 & u_arg2 & 1);
549 static inline int64_t msa_aver_s_df(uint32_t df, int64_t arg1, int64_t arg2)
552 return (arg1 >> 1) + (arg2 >> 1) + ((arg1 | arg2) & 1);
555 static inline uint64_t msa_aver_u_df(uint32_t df, uint64_t arg1, uint64_t arg2)
557 uint64_t u_arg1 = UNSIGNED(arg1, df);
558 uint64_t u_arg2 = UNSIGNED(arg2, df);
560 return (u_arg1 >> 1) + (u_arg2 >> 1) + ((u_arg1 | u_arg2) & 1);
563 static inline int64_t msa_subs_s_df(uint32_t df, int64_t arg1, int64_t arg2)
565 int64_t max_int = DF_MAX_INT(df);
566 int64_t min_int = DF_MIN_INT(df);
568 return (min_int + arg2 < arg1) ? arg1 - arg2 : min_int;
570 return (arg1 < max_int + arg2) ? arg1 - arg2 : max_int;
574 static inline int64_t msa_subs_u_df(uint32_t df, int64_t arg1, int64_t arg2)
576 uint64_t u_arg1 = UNSIGNED(arg1, df);
577 uint64_t u_arg2 = UNSIGNED(arg2, df);
578 return (u_arg1 > u_arg2) ? u_arg1 - u_arg2 : 0;
581 static inline int64_t msa_subsus_u_df(uint32_t df, int64_t arg1, int64_t arg2)
583 uint64_t u_arg1 = UNSIGNED(arg1, df);
584 uint64_t max_uint = DF_MAX_UINT(df);
586 uint64_t u_arg2 = (uint64_t)arg2;
587 return (u_arg1 > u_arg2) ?
588 (int64_t)(u_arg1 - u_arg2) :
591 uint64_t u_arg2 = (uint64_t)(-arg2);
592 return (u_arg1 < max_uint - u_arg2) ?
593 (int64_t)(u_arg1 + u_arg2) :
598 static inline int64_t msa_subsuu_s_df(uint32_t df, int64_t arg1, int64_t arg2)
600 uint64_t u_arg1 = UNSIGNED(arg1, df);
601 uint64_t u_arg2 = UNSIGNED(arg2, df);
602 int64_t max_int = DF_MAX_INT(df);
603 int64_t min_int = DF_MIN_INT(df);
604 if (u_arg1 > u_arg2) {
605 return u_arg1 - u_arg2 < (uint64_t)max_int ?
606 (int64_t)(u_arg1 - u_arg2) :
609 return u_arg2 - u_arg1 < (uint64_t)(-min_int) ?
610 (int64_t)(u_arg1 - u_arg2) :
615 static inline int64_t msa_asub_s_df(uint32_t df, int64_t arg1, int64_t arg2)
618 return (arg1 < arg2) ?
619 (uint64_t)(arg2 - arg1) : (uint64_t)(arg1 - arg2);
622 static inline uint64_t msa_asub_u_df(uint32_t df, uint64_t arg1, uint64_t arg2)
624 uint64_t u_arg1 = UNSIGNED(arg1, df);
625 uint64_t u_arg2 = UNSIGNED(arg2, df);
626 /* unsigned compare */
627 return (u_arg1 < u_arg2) ?
628 (uint64_t)(u_arg2 - u_arg1) : (uint64_t)(u_arg1 - u_arg2);
631 static inline int64_t msa_mulv_df(uint32_t df, int64_t arg1, int64_t arg2)
636 static inline int64_t msa_div_s_df(uint32_t df, int64_t arg1, int64_t arg2)
638 if (arg1 == DF_MIN_INT(df) && arg2 == -1) {
639 return DF_MIN_INT(df);
641 return arg2 ? arg1 / arg2 : 0;
644 static inline int64_t msa_div_u_df(uint32_t df, int64_t arg1, int64_t arg2)
646 uint64_t u_arg1 = UNSIGNED(arg1, df);
647 uint64_t u_arg2 = UNSIGNED(arg2, df);
648 return u_arg2 ? u_arg1 / u_arg2 : 0;
651 static inline int64_t msa_mod_s_df(uint32_t df, int64_t arg1, int64_t arg2)
653 if (arg1 == DF_MIN_INT(df) && arg2 == -1) {
656 return arg2 ? arg1 % arg2 : 0;
659 static inline int64_t msa_mod_u_df(uint32_t df, int64_t arg1, int64_t arg2)
661 uint64_t u_arg1 = UNSIGNED(arg1, df);
662 uint64_t u_arg2 = UNSIGNED(arg2, df);
663 return u_arg2 ? u_arg1 % u_arg2 : 0;
666 #define SIGNED_EVEN(a, df) \
667 ((((int64_t)(a)) << (64 - DF_BITS(df)/2)) >> (64 - DF_BITS(df)/2))
669 #define UNSIGNED_EVEN(a, df) \
670 ((((uint64_t)(a)) << (64 - DF_BITS(df)/2)) >> (64 - DF_BITS(df)/2))
672 #define SIGNED_ODD(a, df) \
673 ((((int64_t)(a)) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df)/2))
675 #define UNSIGNED_ODD(a, df) \
676 ((((uint64_t)(a)) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df)/2))
678 #define SIGNED_EXTRACT(e, o, a, df) \
680 e = SIGNED_EVEN(a, df); \
681 o = SIGNED_ODD(a, df); \
684 #define UNSIGNED_EXTRACT(e, o, a, df) \
686 e = UNSIGNED_EVEN(a, df); \
687 o = UNSIGNED_ODD(a, df); \
690 static inline int64_t msa_dotp_s_df(uint32_t df, int64_t arg1, int64_t arg2)
696 SIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
697 SIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
698 return (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2);
701 static inline int64_t msa_dotp_u_df(uint32_t df, int64_t arg1, int64_t arg2)
707 UNSIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
708 UNSIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
709 return (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2);
712 #define CONCATENATE_AND_SLIDE(s, k) \
714 for (i = 0; i < s; i++) { \
715 v[i] = pws->b[s * k + i]; \
716 v[i + s] = pwd->b[s * k + i]; \
718 for (i = 0; i < s; i++) { \
719 pwd->b[s * k + i] = v[i + n]; \
723 static inline void msa_sld_df(uint32_t df, wr_t *pwd,
724 wr_t *pws, target_ulong rt)
726 uint32_t n = rt % DF_ELEMENTS(df);
732 CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_BYTE), 0);
735 for (k = 0; k < 2; k++) {
736 CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_HALF), k);
740 for (k = 0; k < 4; k++) {
741 CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_WORD), k);
745 for (k = 0; k < 8; k++) {
746 CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_DOUBLE), k);
754 static inline int64_t msa_hadd_s_df(uint32_t df, int64_t arg1, int64_t arg2)
756 return SIGNED_ODD(arg1, df) + SIGNED_EVEN(arg2, df);
759 static inline int64_t msa_hadd_u_df(uint32_t df, int64_t arg1, int64_t arg2)
761 return UNSIGNED_ODD(arg1, df) + UNSIGNED_EVEN(arg2, df);
764 static inline int64_t msa_hsub_s_df(uint32_t df, int64_t arg1, int64_t arg2)
766 return SIGNED_ODD(arg1, df) - SIGNED_EVEN(arg2, df);
769 static inline int64_t msa_hsub_u_df(uint32_t df, int64_t arg1, int64_t arg2)
771 return UNSIGNED_ODD(arg1, df) - UNSIGNED_EVEN(arg2, df);
774 static inline int64_t msa_mul_q_df(uint32_t df, int64_t arg1, int64_t arg2)
776 int64_t q_min = DF_MIN_INT(df);
777 int64_t q_max = DF_MAX_INT(df);
779 if (arg1 == q_min && arg2 == q_min) {
782 return (arg1 * arg2) >> (DF_BITS(df) - 1);
785 static inline int64_t msa_mulr_q_df(uint32_t df, int64_t arg1, int64_t arg2)
787 int64_t q_min = DF_MIN_INT(df);
788 int64_t q_max = DF_MAX_INT(df);
789 int64_t r_bit = 1 << (DF_BITS(df) - 2);
791 if (arg1 == q_min && arg2 == q_min) {
794 return (arg1 * arg2 + r_bit) >> (DF_BITS(df) - 1);
797 #define MSA_BINOP_DF(func) \
798 void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, \
799 uint32_t wd, uint32_t ws, uint32_t wt) \
801 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
802 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
803 wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \
808 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
809 pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], pwt->b[i]); \
813 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
814 pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], pwt->h[i]); \
818 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
819 pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], pwt->w[i]); \
823 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
824 pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], pwt->d[i]); \
861 MSA_BINOP_DF(subsus_u)
862 MSA_BINOP_DF(subsuu_s)
883 void helper_msa_sld_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
884 uint32_t ws, uint32_t rt)
886 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
887 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
889 msa_sld_df(df, pwd, pws, env->active_tc.gpr[rt]);
892 static inline int64_t msa_maddv_df(uint32_t df, int64_t dest, int64_t arg1,
895 return dest + arg1 * arg2;
898 static inline int64_t msa_msubv_df(uint32_t df, int64_t dest, int64_t arg1,
901 return dest - arg1 * arg2;
904 static inline int64_t msa_dpadd_s_df(uint32_t df, int64_t dest, int64_t arg1,
911 SIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
912 SIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
913 return dest + (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2);
916 static inline int64_t msa_dpadd_u_df(uint32_t df, int64_t dest, int64_t arg1,
923 UNSIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
924 UNSIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
925 return dest + (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2);
928 static inline int64_t msa_dpsub_s_df(uint32_t df, int64_t dest, int64_t arg1,
935 SIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
936 SIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
937 return dest - ((even_arg1 * even_arg2) + (odd_arg1 * odd_arg2));
940 static inline int64_t msa_dpsub_u_df(uint32_t df, int64_t dest, int64_t arg1,
947 UNSIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
948 UNSIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
949 return dest - ((even_arg1 * even_arg2) + (odd_arg1 * odd_arg2));
952 static inline int64_t msa_madd_q_df(uint32_t df, int64_t dest, int64_t arg1,
955 int64_t q_prod, q_ret;
957 int64_t q_max = DF_MAX_INT(df);
958 int64_t q_min = DF_MIN_INT(df);
960 q_prod = arg1 * arg2;
961 q_ret = ((dest << (DF_BITS(df) - 1)) + q_prod) >> (DF_BITS(df) - 1);
963 return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret;
966 static inline int64_t msa_msub_q_df(uint32_t df, int64_t dest, int64_t arg1,
969 int64_t q_prod, q_ret;
971 int64_t q_max = DF_MAX_INT(df);
972 int64_t q_min = DF_MIN_INT(df);
974 q_prod = arg1 * arg2;
975 q_ret = ((dest << (DF_BITS(df) - 1)) - q_prod) >> (DF_BITS(df) - 1);
977 return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret;
980 static inline int64_t msa_maddr_q_df(uint32_t df, int64_t dest, int64_t arg1,
983 int64_t q_prod, q_ret;
985 int64_t q_max = DF_MAX_INT(df);
986 int64_t q_min = DF_MIN_INT(df);
987 int64_t r_bit = 1 << (DF_BITS(df) - 2);
989 q_prod = arg1 * arg2;
990 q_ret = ((dest << (DF_BITS(df) - 1)) + q_prod + r_bit) >> (DF_BITS(df) - 1);
992 return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret;
995 static inline int64_t msa_msubr_q_df(uint32_t df, int64_t dest, int64_t arg1,
998 int64_t q_prod, q_ret;
1000 int64_t q_max = DF_MAX_INT(df);
1001 int64_t q_min = DF_MIN_INT(df);
1002 int64_t r_bit = 1 << (DF_BITS(df) - 2);
1004 q_prod = arg1 * arg2;
1005 q_ret = ((dest << (DF_BITS(df) - 1)) - q_prod + r_bit) >> (DF_BITS(df) - 1);
1007 return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret;
1010 #define MSA_TEROP_DF(func) \
1011 void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, uint32_t wd, \
1012 uint32_t ws, uint32_t wt) \
1014 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
1015 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
1016 wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \
1021 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
1022 pwd->b[i] = msa_ ## func ## _df(df, pwd->b[i], pws->b[i], \
1027 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
1028 pwd->h[i] = msa_ ## func ## _df(df, pwd->h[i], pws->h[i], \
1033 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
1034 pwd->w[i] = msa_ ## func ## _df(df, pwd->w[i], pws->w[i], \
1039 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
1040 pwd->d[i] = msa_ ## func ## _df(df, pwd->d[i], pws->d[i], \
1051 MSA_TEROP_DF(dpadd_s)
1052 MSA_TEROP_DF(dpadd_u)
1053 MSA_TEROP_DF(dpsub_s)
1054 MSA_TEROP_DF(dpsub_u)
1057 MSA_TEROP_DF(madd_q)
1058 MSA_TEROP_DF(msub_q)
1059 MSA_TEROP_DF(maddr_q)
1060 MSA_TEROP_DF(msubr_q)
1063 static inline void msa_splat_df(uint32_t df, wr_t *pwd,
1064 wr_t *pws, target_ulong rt)
1066 uint32_t n = rt % DF_ELEMENTS(df);
1071 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) {
1072 pwd->b[i] = pws->b[n];
1076 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) {
1077 pwd->h[i] = pws->h[n];
1081 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
1082 pwd->w[i] = pws->w[n];
1086 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
1087 pwd->d[i] = pws->d[n];
1095 void helper_msa_splat_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
1096 uint32_t ws, uint32_t rt)
1098 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1099 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
1101 msa_splat_df(df, pwd, pws, env->active_tc.gpr[rt]);
1104 #define MSA_DO_B MSA_DO(b)
1105 #define MSA_DO_H MSA_DO(h)
1106 #define MSA_DO_W MSA_DO(w)
1107 #define MSA_DO_D MSA_DO(d)
1109 #define MSA_LOOP_B MSA_LOOP(B)
1110 #define MSA_LOOP_H MSA_LOOP(H)
1111 #define MSA_LOOP_W MSA_LOOP(W)
1112 #define MSA_LOOP_D MSA_LOOP(D)
1114 #define MSA_LOOP_COND_B MSA_LOOP_COND(DF_BYTE)
1115 #define MSA_LOOP_COND_H MSA_LOOP_COND(DF_HALF)
1116 #define MSA_LOOP_COND_W MSA_LOOP_COND(DF_WORD)
1117 #define MSA_LOOP_COND_D MSA_LOOP_COND(DF_DOUBLE)
1119 #define MSA_LOOP(DF) \
1120 for (i = 0; i < (MSA_LOOP_COND_ ## DF) ; i++) { \
1124 #define MSA_FN_DF(FUNC) \
1125 void helper_msa_##FUNC(CPUMIPSState *env, uint32_t df, uint32_t wd, \
1126 uint32_t ws, uint32_t wt) \
1128 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
1129 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
1130 wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \
1131 wr_t wx, *pwx = &wx; \
1149 msa_move_v(pwd, pwx); \
1152 #define MSA_LOOP_COND(DF) \
1153 (DF_ELEMENTS(DF) / 2)
1155 #define Rb(pwr, i) (pwr->b[i])
1156 #define Lb(pwr, i) (pwr->b[i + DF_ELEMENTS(DF_BYTE)/2])
1157 #define Rh(pwr, i) (pwr->h[i])
1158 #define Lh(pwr, i) (pwr->h[i + DF_ELEMENTS(DF_HALF)/2])
1159 #define Rw(pwr, i) (pwr->w[i])
1160 #define Lw(pwr, i) (pwr->w[i + DF_ELEMENTS(DF_WORD)/2])
1161 #define Rd(pwr, i) (pwr->d[i])
1162 #define Ld(pwr, i) (pwr->d[i + DF_ELEMENTS(DF_DOUBLE)/2])
1164 #define MSA_DO(DF) \
1166 R##DF(pwx, i) = pwt->DF[2*i]; \
1167 L##DF(pwx, i) = pws->DF[2*i]; \
1172 #define MSA_DO(DF) \
1174 R##DF(pwx, i) = pwt->DF[2*i+1]; \
1175 L##DF(pwx, i) = pws->DF[2*i+1]; \
1180 #define MSA_DO(DF) \
1182 pwx->DF[2*i] = L##DF(pwt, i); \
1183 pwx->DF[2*i+1] = L##DF(pws, i); \
1188 #define MSA_DO(DF) \
1190 pwx->DF[2*i] = R##DF(pwt, i); \
1191 pwx->DF[2*i+1] = R##DF(pws, i); \
1196 #define MSA_DO(DF) \
1198 pwx->DF[2*i] = pwt->DF[2*i]; \
1199 pwx->DF[2*i+1] = pws->DF[2*i]; \
1204 #define MSA_DO(DF) \
1206 pwx->DF[2*i] = pwt->DF[2*i+1]; \
1207 pwx->DF[2*i+1] = pws->DF[2*i+1]; \
1211 #undef MSA_LOOP_COND
1213 #define MSA_LOOP_COND(DF) \
1216 #define MSA_DO(DF) \
1218 uint32_t n = DF_ELEMENTS(df); \
1219 uint32_t k = (pwd->DF[i] & 0x3f) % (2 * n); \
1221 (pwd->DF[i] & 0xc0) ? 0 : k < n ? pwt->DF[k] : pws->DF[k - n]; \
1225 #undef MSA_LOOP_COND
1228 void helper_msa_sldi_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
1229 uint32_t ws, uint32_t n)
1231 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1232 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
1234 msa_sld_df(df, pwd, pws, n);
1237 void helper_msa_splati_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
1238 uint32_t ws, uint32_t n)
1240 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1241 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
1243 msa_splat_df(df, pwd, pws, n);
1246 void helper_msa_copy_s_df(CPUMIPSState *env, uint32_t df, uint32_t rd,
1247 uint32_t ws, uint32_t n)
1249 n %= DF_ELEMENTS(df);
1253 env->active_tc.gpr[rd] = (int8_t)env->active_fpu.fpr[ws].wr.b[n];
1256 env->active_tc.gpr[rd] = (int16_t)env->active_fpu.fpr[ws].wr.h[n];
1259 env->active_tc.gpr[rd] = (int32_t)env->active_fpu.fpr[ws].wr.w[n];
1261 #ifdef TARGET_MIPS64
1263 env->active_tc.gpr[rd] = (int64_t)env->active_fpu.fpr[ws].wr.d[n];
1271 void helper_msa_copy_u_df(CPUMIPSState *env, uint32_t df, uint32_t rd,
1272 uint32_t ws, uint32_t n)
1274 n %= DF_ELEMENTS(df);
1278 env->active_tc.gpr[rd] = (uint8_t)env->active_fpu.fpr[ws].wr.b[n];
1281 env->active_tc.gpr[rd] = (uint16_t)env->active_fpu.fpr[ws].wr.h[n];
1284 env->active_tc.gpr[rd] = (uint32_t)env->active_fpu.fpr[ws].wr.w[n];
1286 #ifdef TARGET_MIPS64
1288 env->active_tc.gpr[rd] = (uint64_t)env->active_fpu.fpr[ws].wr.d[n];
1296 void helper_msa_insert_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
1297 uint32_t rs_num, uint32_t n)
1299 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1300 target_ulong rs = env->active_tc.gpr[rs_num];
1304 pwd->b[n] = (int8_t)rs;
1307 pwd->h[n] = (int16_t)rs;
1310 pwd->w[n] = (int32_t)rs;
1313 pwd->d[n] = (int64_t)rs;
1320 void helper_msa_insve_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
1321 uint32_t ws, uint32_t n)
1323 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1324 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
1328 pwd->b[n] = (int8_t)pws->b[0];
1331 pwd->h[n] = (int16_t)pws->h[0];
1334 pwd->w[n] = (int32_t)pws->w[0];
1337 pwd->d[n] = (int64_t)pws->d[0];
1344 void helper_msa_ctcmsa(CPUMIPSState *env, target_ulong elm, uint32_t cd)
1350 env->active_tc.msacsr = (int32_t)elm & MSACSR_MASK;
1351 /* set float_status rounding mode */
1352 set_float_rounding_mode(
1353 ieee_rm[(env->active_tc.msacsr & MSACSR_RM_MASK) >> MSACSR_RM],
1354 &env->active_tc.msa_fp_status);
1355 /* set float_status flush modes */
1357 (env->active_tc.msacsr & MSACSR_FS_MASK) != 0 ? 1 : 0,
1358 &env->active_tc.msa_fp_status);
1359 set_flush_inputs_to_zero(
1360 (env->active_tc.msacsr & MSACSR_FS_MASK) != 0 ? 1 : 0,
1361 &env->active_tc.msa_fp_status);
1362 /* check exception */
1363 if ((GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED)
1364 & GET_FP_CAUSE(env->active_tc.msacsr)) {
1365 helper_raise_exception(env, EXCP_MSAFPE);
1371 target_ulong helper_msa_cfcmsa(CPUMIPSState *env, uint32_t cs)
1377 return env->active_tc.msacsr & MSACSR_MASK;
1382 void helper_msa_move_v(CPUMIPSState *env, uint32_t wd, uint32_t ws)
1384 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1385 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
1387 msa_move_v(pwd, pws);
1390 static inline int64_t msa_pcnt_df(uint32_t df, int64_t arg)
1394 x = UNSIGNED(arg, df);
1396 x = (x & 0x5555555555555555ULL) + ((x >> 1) & 0x5555555555555555ULL);
1397 x = (x & 0x3333333333333333ULL) + ((x >> 2) & 0x3333333333333333ULL);
1398 x = (x & 0x0F0F0F0F0F0F0F0FULL) + ((x >> 4) & 0x0F0F0F0F0F0F0F0FULL);
1399 x = (x & 0x00FF00FF00FF00FFULL) + ((x >> 8) & 0x00FF00FF00FF00FFULL);
1400 x = (x & 0x0000FFFF0000FFFFULL) + ((x >> 16) & 0x0000FFFF0000FFFFULL);
1401 x = (x & 0x00000000FFFFFFFFULL) + ((x >> 32));
1406 static inline int64_t msa_nlzc_df(uint32_t df, int64_t arg)
1411 x = UNSIGNED(arg, df);
1413 c = DF_BITS(df) / 2;
1427 static inline int64_t msa_nloc_df(uint32_t df, int64_t arg)
1429 return msa_nlzc_df(df, UNSIGNED((~arg), df));
1432 void helper_msa_fill_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
1435 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1440 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) {
1441 pwd->b[i] = (int8_t)env->active_tc.gpr[rs];
1445 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) {
1446 pwd->h[i] = (int16_t)env->active_tc.gpr[rs];
1450 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
1451 pwd->w[i] = (int32_t)env->active_tc.gpr[rs];
1455 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
1456 pwd->d[i] = (int64_t)env->active_tc.gpr[rs];
1464 #define MSA_UNOP_DF(func) \
1465 void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, \
1466 uint32_t wd, uint32_t ws) \
1468 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
1469 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
1474 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
1475 pwd->b[i] = msa_ ## func ## _df(df, pws->b[i]); \
1479 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
1480 pwd->h[i] = msa_ ## func ## _df(df, pws->h[i]); \
1484 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
1485 pwd->w[i] = msa_ ## func ## _df(df, pws->w[i]); \
1489 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
1490 pwd->d[i] = msa_ ## func ## _df(df, pws->d[i]); \
1503 #define FLOAT_ONE32 make_float32(0x3f8 << 20)
1504 #define FLOAT_ONE64 make_float64(0x3ffULL << 52)
1506 #define FLOAT_SNAN16 (float16_default_nan ^ 0x0220)
1508 #define FLOAT_SNAN32 (float32_default_nan ^ 0x00400020)
1510 #define FLOAT_SNAN64 (float64_default_nan ^ 0x0008000000000020ULL)
1511 /* 0x7ff0000000000020 */
1513 static inline void clear_msacsr_cause(CPUMIPSState *env)
1515 SET_FP_CAUSE(env->active_tc.msacsr, 0);
1518 static inline void check_msacsr_cause(CPUMIPSState *env)
1520 if ((GET_FP_CAUSE(env->active_tc.msacsr) &
1521 (GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED)) == 0) {
1522 UPDATE_FP_FLAGS(env->active_tc.msacsr,
1523 GET_FP_CAUSE(env->active_tc.msacsr));
1525 helper_raise_exception(env, EXCP_MSAFPE);
1529 /* Flush-to-zero use cases for update_msacsr() */
1530 #define CLEAR_FS_UNDERFLOW 1
1531 #define CLEAR_IS_INEXACT 2
1532 #define RECIPROCAL_INEXACT 4
1534 static inline int update_msacsr(CPUMIPSState *env, int action, int denormal)
1542 ieee_ex = get_float_exception_flags(&env->active_tc.msa_fp_status);
1544 /* QEMU softfloat does not signal all underflow cases */
1546 ieee_ex |= float_flag_underflow;
1549 c = ieee_ex_to_mips(ieee_ex);
1550 enable = GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED;
1552 /* Set Inexact (I) when flushing inputs to zero */
1553 if ((ieee_ex & float_flag_input_denormal) &&
1554 (env->active_tc.msacsr & MSACSR_FS_MASK) != 0) {
1555 if (action & CLEAR_IS_INEXACT) {
1562 /* Set Inexact (I) and Underflow (U) when flushing outputs to zero */
1563 if ((ieee_ex & float_flag_output_denormal) &&
1564 (env->active_tc.msacsr & MSACSR_FS_MASK) != 0) {
1566 if (action & CLEAR_FS_UNDERFLOW) {
1573 /* Set Inexact (I) when Overflow (O) is not enabled */
1574 if ((c & FP_OVERFLOW) != 0 && (enable & FP_OVERFLOW) == 0) {
1578 /* Clear Exact Underflow when Underflow (U) is not enabled */
1579 if ((c & FP_UNDERFLOW) != 0 && (enable & FP_UNDERFLOW) == 0 &&
1580 (c & FP_INEXACT) == 0) {
1584 /* Reciprocal operations set only Inexact when valid and not
1586 if ((action & RECIPROCAL_INEXACT) &&
1587 (c & (FP_INVALID | FP_DIV0)) == 0) {
1591 cause = c & enable; /* all current enabled exceptions */
1594 /* No enabled exception, update the MSACSR Cause
1595 with all current exceptions */
1596 SET_FP_CAUSE(env->active_tc.msacsr,
1597 (GET_FP_CAUSE(env->active_tc.msacsr) | c));
1599 /* Current exceptions are enabled */
1600 if ((env->active_tc.msacsr & MSACSR_NX_MASK) == 0) {
1601 /* Exception(s) will trap, update MSACSR Cause
1602 with all enabled exceptions */
1603 SET_FP_CAUSE(env->active_tc.msacsr,
1604 (GET_FP_CAUSE(env->active_tc.msacsr) | c));
1611 static inline int get_enabled_exceptions(const CPUMIPSState *env, int c)
1613 int enable = GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED;
1617 static inline float16 float16_from_float32(int32 a, flag ieee STATUS_PARAM)
1621 f_val = float32_to_float16((float32)a, ieee STATUS_VAR);
1622 f_val = float16_maybe_silence_nan(f_val);
1624 return a < 0 ? (f_val | (1 << 15)) : f_val;
1627 static inline float32 float32_from_float64(int64 a STATUS_PARAM)
1631 f_val = float64_to_float32((float64)a STATUS_VAR);
1632 f_val = float32_maybe_silence_nan(f_val);
1634 return a < 0 ? (f_val | (1 << 31)) : f_val;
1637 static inline float32 float32_from_float16(int16_t a, flag ieee STATUS_PARAM)
1641 f_val = float16_to_float32((float16)a, ieee STATUS_VAR);
1642 f_val = float32_maybe_silence_nan(f_val);
1644 return a < 0 ? (f_val | (1 << 31)) : f_val;
1647 static inline float64 float64_from_float32(int32 a STATUS_PARAM)
1651 f_val = float32_to_float64((float64)a STATUS_VAR);
1652 f_val = float64_maybe_silence_nan(f_val);
1654 return a < 0 ? (f_val | (1ULL << 63)) : f_val;
1657 static inline float32 float32_from_q16(int16_t a STATUS_PARAM)
1661 /* conversion as integer and scaling */
1662 f_val = int32_to_float32(a STATUS_VAR);
1663 f_val = float32_scalbn(f_val, -15 STATUS_VAR);
1668 static inline float64 float64_from_q32(int32 a STATUS_PARAM)
1672 /* conversion as integer and scaling */
1673 f_val = int32_to_float64(a STATUS_VAR);
1674 f_val = float64_scalbn(f_val, -31 STATUS_VAR);
1679 static inline int16_t float32_to_q16(float32 a STATUS_PARAM)
1682 int32 q_min = 0xffff8000;
1683 int32 q_max = 0x00007fff;
1687 if (float32_is_any_nan(a)) {
1688 float_raise(float_flag_invalid STATUS_VAR);
1693 a = float32_scalbn(a, 15 STATUS_VAR);
1695 ieee_ex = get_float_exception_flags(status);
1696 set_float_exception_flags(ieee_ex & (~float_flag_underflow)
1699 if (ieee_ex & float_flag_overflow) {
1700 float_raise(float_flag_inexact STATUS_VAR);
1701 return (int32)a < 0 ? q_min : q_max;
1704 /* conversion to int */
1705 q_val = float32_to_int32(a STATUS_VAR);
1707 ieee_ex = get_float_exception_flags(status);
1708 set_float_exception_flags(ieee_ex & (~float_flag_underflow)
1711 if (ieee_ex & float_flag_invalid) {
1712 set_float_exception_flags(ieee_ex & (~float_flag_invalid)
1714 float_raise(float_flag_overflow | float_flag_inexact STATUS_VAR);
1715 return (int32)a < 0 ? q_min : q_max;
1718 if (q_val < q_min) {
1719 float_raise(float_flag_overflow | float_flag_inexact STATUS_VAR);
1720 return (int16_t)q_min;
1723 if (q_max < q_val) {
1724 float_raise(float_flag_overflow | float_flag_inexact STATUS_VAR);
1725 return (int16_t)q_max;
1728 return (int16_t)q_val;
1731 static inline int32 float64_to_q32(float64 a STATUS_PARAM)
1734 int64 q_min = 0xffffffff80000000LL;
1735 int64 q_max = 0x000000007fffffffLL;
1739 if (float64_is_any_nan(a)) {
1740 float_raise(float_flag_invalid STATUS_VAR);
1745 a = float64_scalbn(a, 31 STATUS_VAR);
1747 ieee_ex = get_float_exception_flags(status);
1748 set_float_exception_flags(ieee_ex & (~float_flag_underflow)
1751 if (ieee_ex & float_flag_overflow) {
1752 float_raise(float_flag_inexact STATUS_VAR);
1753 return (int64)a < 0 ? q_min : q_max;
1756 /* conversion to integer */
1757 q_val = float64_to_int64(a STATUS_VAR);
1759 ieee_ex = get_float_exception_flags(status);
1760 set_float_exception_flags(ieee_ex & (~float_flag_underflow)
1763 if (ieee_ex & float_flag_invalid) {
1764 set_float_exception_flags(ieee_ex & (~float_flag_invalid)
1766 float_raise(float_flag_overflow | float_flag_inexact STATUS_VAR);
1767 return (int64)a < 0 ? q_min : q_max;
1770 if (q_val < q_min) {
1771 float_raise(float_flag_overflow | float_flag_inexact STATUS_VAR);
1772 return (int32)q_min;
1775 if (q_max < q_val) {
1776 float_raise(float_flag_overflow | float_flag_inexact STATUS_VAR);
1777 return (int32)q_max;
1780 return (int32)q_val;
1783 #define MSA_FLOAT_COND(DEST, OP, ARG1, ARG2, BITS, QUIET) \
1787 set_float_exception_flags(0, &env->active_tc.msa_fp_status); \
1789 cond = float ## BITS ## _ ## OP(ARG1, ARG2, \
1790 &env->active_tc.msa_fp_status); \
1792 cond = float ## BITS ## _ ## OP ## _quiet(ARG1, ARG2, \
1793 &env->active_tc.msa_fp_status); \
1795 DEST = cond ? M_MAX_UINT(BITS) : 0; \
1796 c = update_msacsr(env, CLEAR_IS_INEXACT, 0); \
1798 if (get_enabled_exceptions(env, c)) { \
1799 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
1803 #define MSA_FLOAT_AF(DEST, ARG1, ARG2, BITS, QUIET) \
1805 MSA_FLOAT_COND(DEST, eq, ARG1, ARG2, BITS, QUIET); \
1806 if ((DEST & M_MAX_UINT(BITS)) == M_MAX_UINT(BITS)) { \
1811 #define MSA_FLOAT_UEQ(DEST, ARG1, ARG2, BITS, QUIET) \
1813 MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \
1815 MSA_FLOAT_COND(DEST, eq, ARG1, ARG2, BITS, QUIET); \
1819 #define MSA_FLOAT_NE(DEST, ARG1, ARG2, BITS, QUIET) \
1821 MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \
1823 MSA_FLOAT_COND(DEST, lt, ARG2, ARG1, BITS, QUIET); \
1827 #define MSA_FLOAT_UNE(DEST, ARG1, ARG2, BITS, QUIET) \
1829 MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \
1831 MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \
1833 MSA_FLOAT_COND(DEST, lt, ARG2, ARG1, BITS, QUIET); \
1838 #define MSA_FLOAT_ULE(DEST, ARG1, ARG2, BITS, QUIET) \
1840 MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \
1842 MSA_FLOAT_COND(DEST, le, ARG1, ARG2, BITS, QUIET); \
1846 #define MSA_FLOAT_ULT(DEST, ARG1, ARG2, BITS, QUIET) \
1848 MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \
1850 MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \
1854 #define MSA_FLOAT_OR(DEST, ARG1, ARG2, BITS, QUIET) \
1856 MSA_FLOAT_COND(DEST, le, ARG1, ARG2, BITS, QUIET); \
1858 MSA_FLOAT_COND(DEST, le, ARG2, ARG1, BITS, QUIET); \
1862 static inline void compare_af(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
1863 wr_t *pwt, uint32_t df, int quiet)
1865 wr_t wx, *pwx = &wx;
1868 clear_msacsr_cause(env);
1872 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
1873 MSA_FLOAT_AF(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
1877 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
1878 MSA_FLOAT_AF(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
1885 check_msacsr_cause(env);
1887 msa_move_v(pwd, pwx);
1890 static inline void compare_un(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
1891 wr_t *pwt, uint32_t df, int quiet)
1893 wr_t wx, *pwx = &wx;
1896 clear_msacsr_cause(env);
1900 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
1901 MSA_FLOAT_COND(pwx->w[i], unordered, pws->w[i], pwt->w[i], 32,
1906 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
1907 MSA_FLOAT_COND(pwx->d[i], unordered, pws->d[i], pwt->d[i], 64,
1915 check_msacsr_cause(env);
1917 msa_move_v(pwd, pwx);
1920 static inline void compare_eq(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
1921 wr_t *pwt, uint32_t df, int quiet)
1923 wr_t wx, *pwx = &wx;
1926 clear_msacsr_cause(env);
1930 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
1931 MSA_FLOAT_COND(pwx->w[i], eq, pws->w[i], pwt->w[i], 32, quiet);
1935 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
1936 MSA_FLOAT_COND(pwx->d[i], eq, pws->d[i], pwt->d[i], 64, quiet);
1943 check_msacsr_cause(env);
1945 msa_move_v(pwd, pwx);
1948 static inline void compare_ueq(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
1949 wr_t *pwt, uint32_t df, int quiet)
1951 wr_t wx, *pwx = &wx;
1954 clear_msacsr_cause(env);
1958 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
1959 MSA_FLOAT_UEQ(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
1963 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
1964 MSA_FLOAT_UEQ(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
1971 check_msacsr_cause(env);
1973 msa_move_v(pwd, pwx);
1976 static inline void compare_lt(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
1977 wr_t *pwt, uint32_t df, int quiet)
1979 wr_t wx, *pwx = &wx;
1982 clear_msacsr_cause(env);
1986 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
1987 MSA_FLOAT_COND(pwx->w[i], lt, pws->w[i], pwt->w[i], 32, quiet);
1991 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
1992 MSA_FLOAT_COND(pwx->d[i], lt, pws->d[i], pwt->d[i], 64, quiet);
1999 check_msacsr_cause(env);
2001 msa_move_v(pwd, pwx);
2004 static inline void compare_ult(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
2005 wr_t *pwt, uint32_t df, int quiet)
2007 wr_t wx, *pwx = &wx;
2010 clear_msacsr_cause(env);
2014 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2015 MSA_FLOAT_ULT(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
2019 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2020 MSA_FLOAT_ULT(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
2027 check_msacsr_cause(env);
2029 msa_move_v(pwd, pwx);
2032 static inline void compare_le(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
2033 wr_t *pwt, uint32_t df, int quiet)
2035 wr_t wx, *pwx = &wx;
2038 clear_msacsr_cause(env);
2042 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2043 MSA_FLOAT_COND(pwx->w[i], le, pws->w[i], pwt->w[i], 32, quiet);
2047 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2048 MSA_FLOAT_COND(pwx->d[i], le, pws->d[i], pwt->d[i], 64, quiet);
2055 check_msacsr_cause(env);
2057 msa_move_v(pwd, pwx);
2060 static inline void compare_ule(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
2061 wr_t *pwt, uint32_t df, int quiet)
2063 wr_t wx, *pwx = &wx;
2066 clear_msacsr_cause(env);
2070 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2071 MSA_FLOAT_ULE(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
2075 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2076 MSA_FLOAT_ULE(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
2083 check_msacsr_cause(env);
2085 msa_move_v(pwd, pwx);
2088 static inline void compare_or(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
2089 wr_t *pwt, uint32_t df, int quiet)
2091 wr_t wx, *pwx = &wx;
2094 clear_msacsr_cause(env);
2098 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2099 MSA_FLOAT_OR(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
2103 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2104 MSA_FLOAT_OR(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
2111 check_msacsr_cause(env);
2113 msa_move_v(pwd, pwx);
2116 static inline void compare_une(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
2117 wr_t *pwt, uint32_t df, int quiet)
2119 wr_t wx, *pwx = &wx;
2122 clear_msacsr_cause(env);
2126 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2127 MSA_FLOAT_UNE(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
2131 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2132 MSA_FLOAT_UNE(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
2139 check_msacsr_cause(env);
2141 msa_move_v(pwd, pwx);
2144 static inline void compare_ne(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
2145 wr_t *pwt, uint32_t df, int quiet) {
2146 wr_t wx, *pwx = &wx;
2149 clear_msacsr_cause(env);
2153 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2154 MSA_FLOAT_NE(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
2158 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2159 MSA_FLOAT_NE(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
2166 check_msacsr_cause(env);
2168 msa_move_v(pwd, pwx);
2171 void helper_msa_fcaf_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2172 uint32_t ws, uint32_t wt)
2174 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2175 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2176 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2177 compare_af(env, pwd, pws, pwt, df, 1);
2180 void helper_msa_fcun_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2181 uint32_t ws, uint32_t wt)
2183 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2184 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2185 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2186 compare_un(env, pwd, pws, pwt, df, 1);
2189 void helper_msa_fceq_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2190 uint32_t ws, uint32_t wt)
2192 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2193 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2194 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2195 compare_eq(env, pwd, pws, pwt, df, 1);
2198 void helper_msa_fcueq_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2199 uint32_t ws, uint32_t wt)
2201 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2202 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2203 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2204 compare_ueq(env, pwd, pws, pwt, df, 1);
2207 void helper_msa_fclt_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2208 uint32_t ws, uint32_t wt)
2210 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2211 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2212 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2213 compare_lt(env, pwd, pws, pwt, df, 1);
2216 void helper_msa_fcult_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2217 uint32_t ws, uint32_t wt)
2219 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2220 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2221 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2222 compare_ult(env, pwd, pws, pwt, df, 1);
2225 void helper_msa_fcle_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2226 uint32_t ws, uint32_t wt)
2228 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2229 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2230 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2231 compare_le(env, pwd, pws, pwt, df, 1);
2234 void helper_msa_fcule_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2235 uint32_t ws, uint32_t wt)
2237 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2238 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2239 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2240 compare_ule(env, pwd, pws, pwt, df, 1);
2243 void helper_msa_fsaf_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2244 uint32_t ws, uint32_t wt)
2246 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2247 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2248 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2249 compare_af(env, pwd, pws, pwt, df, 0);
2252 void helper_msa_fsun_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2253 uint32_t ws, uint32_t wt)
2255 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2256 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2257 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2258 compare_un(env, pwd, pws, pwt, df, 0);
2261 void helper_msa_fseq_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2262 uint32_t ws, uint32_t wt)
2264 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2265 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2266 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2267 compare_eq(env, pwd, pws, pwt, df, 0);
2270 void helper_msa_fsueq_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2271 uint32_t ws, uint32_t wt)
2273 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2274 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2275 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2276 compare_ueq(env, pwd, pws, pwt, df, 0);
2279 void helper_msa_fslt_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2280 uint32_t ws, uint32_t wt)
2282 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2283 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2284 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2285 compare_lt(env, pwd, pws, pwt, df, 0);
2288 void helper_msa_fsult_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2289 uint32_t ws, uint32_t wt)
2291 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2292 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2293 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2294 compare_ult(env, pwd, pws, pwt, df, 0);
2297 void helper_msa_fsle_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2298 uint32_t ws, uint32_t wt)
2300 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2301 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2302 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2303 compare_le(env, pwd, pws, pwt, df, 0);
2306 void helper_msa_fsule_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2307 uint32_t ws, uint32_t wt)
2309 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2310 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2311 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2312 compare_ule(env, pwd, pws, pwt, df, 0);
2315 void helper_msa_fcor_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2316 uint32_t ws, uint32_t wt)
2318 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2319 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2320 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2321 compare_or(env, pwd, pws, pwt, df, 1);
2324 void helper_msa_fcune_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2325 uint32_t ws, uint32_t wt)
2327 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2328 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2329 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2330 compare_une(env, pwd, pws, pwt, df, 1);
2333 void helper_msa_fcne_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2334 uint32_t ws, uint32_t wt)
2336 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2337 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2338 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2339 compare_ne(env, pwd, pws, pwt, df, 1);
2342 void helper_msa_fsor_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2343 uint32_t ws, uint32_t wt)
2345 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2346 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2347 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2348 compare_or(env, pwd, pws, pwt, df, 0);
2351 void helper_msa_fsune_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2352 uint32_t ws, uint32_t wt)
2354 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2355 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2356 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2357 compare_une(env, pwd, pws, pwt, df, 0);
2360 void helper_msa_fsne_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2361 uint32_t ws, uint32_t wt)
2363 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2364 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2365 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2366 compare_ne(env, pwd, pws, pwt, df, 0);
2369 #define float16_is_zero(ARG) 0
2370 #define float16_is_zero_or_denormal(ARG) 0
2372 #define IS_DENORMAL(ARG, BITS) \
2373 (!float ## BITS ## _is_zero(ARG) \
2374 && float ## BITS ## _is_zero_or_denormal(ARG))
2376 #define MSA_FLOAT_BINOP(DEST, OP, ARG1, ARG2, BITS) \
2380 set_float_exception_flags(0, &env->active_tc.msa_fp_status); \
2381 DEST = float ## BITS ## _ ## OP(ARG1, ARG2, \
2382 &env->active_tc.msa_fp_status); \
2383 c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
2385 if (get_enabled_exceptions(env, c)) { \
2386 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
2390 void helper_msa_fadd_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2391 uint32_t ws, uint32_t wt)
2393 wr_t wx, *pwx = &wx;
2394 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2395 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2396 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2399 clear_msacsr_cause(env);
2403 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2404 MSA_FLOAT_BINOP(pwx->w[i], add, pws->w[i], pwt->w[i], 32);
2408 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2409 MSA_FLOAT_BINOP(pwx->d[i], add, pws->d[i], pwt->d[i], 64);
2416 check_msacsr_cause(env);
2417 msa_move_v(pwd, pwx);
2420 void helper_msa_fsub_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2421 uint32_t ws, uint32_t wt)
2423 wr_t wx, *pwx = &wx;
2424 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2425 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2426 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2429 clear_msacsr_cause(env);
2433 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2434 MSA_FLOAT_BINOP(pwx->w[i], sub, pws->w[i], pwt->w[i], 32);
2438 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2439 MSA_FLOAT_BINOP(pwx->d[i], sub, pws->d[i], pwt->d[i], 64);
2446 check_msacsr_cause(env);
2447 msa_move_v(pwd, pwx);
2450 void helper_msa_fmul_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2451 uint32_t ws, uint32_t wt)
2453 wr_t wx, *pwx = &wx;
2454 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2455 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2456 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2459 clear_msacsr_cause(env);
2463 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2464 MSA_FLOAT_BINOP(pwx->w[i], mul, pws->w[i], pwt->w[i], 32);
2468 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2469 MSA_FLOAT_BINOP(pwx->d[i], mul, pws->d[i], pwt->d[i], 64);
2476 check_msacsr_cause(env);
2478 msa_move_v(pwd, pwx);
2481 void helper_msa_fdiv_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2482 uint32_t ws, uint32_t wt)
2484 wr_t wx, *pwx = &wx;
2485 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2486 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2487 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2490 clear_msacsr_cause(env);
2494 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2495 MSA_FLOAT_BINOP(pwx->w[i], div, pws->w[i], pwt->w[i], 32);
2499 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2500 MSA_FLOAT_BINOP(pwx->d[i], div, pws->d[i], pwt->d[i], 64);
2507 check_msacsr_cause(env);
2509 msa_move_v(pwd, pwx);
2512 #define MSA_FLOAT_MULADD(DEST, ARG1, ARG2, ARG3, NEGATE, BITS) \
2516 set_float_exception_flags(0, &env->active_tc.msa_fp_status); \
2517 DEST = float ## BITS ## _muladd(ARG2, ARG3, ARG1, NEGATE, \
2518 &env->active_tc.msa_fp_status); \
2519 c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
2521 if (get_enabled_exceptions(env, c)) { \
2522 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
2526 void helper_msa_fmadd_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2527 uint32_t ws, uint32_t wt)
2529 wr_t wx, *pwx = &wx;
2530 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2531 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2532 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2535 clear_msacsr_cause(env);
2539 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2540 MSA_FLOAT_MULADD(pwx->w[i], pwd->w[i],
2541 pws->w[i], pwt->w[i], 0, 32);
2545 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2546 MSA_FLOAT_MULADD(pwx->d[i], pwd->d[i],
2547 pws->d[i], pwt->d[i], 0, 64);
2554 check_msacsr_cause(env);
2556 msa_move_v(pwd, pwx);
2559 void helper_msa_fmsub_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2560 uint32_t ws, uint32_t wt)
2562 wr_t wx, *pwx = &wx;
2563 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2564 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2565 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2568 clear_msacsr_cause(env);
2572 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2573 MSA_FLOAT_MULADD(pwx->w[i], pwd->w[i],
2574 pws->w[i], pwt->w[i],
2575 float_muladd_negate_product, 32);
2579 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2580 MSA_FLOAT_MULADD(pwx->d[i], pwd->d[i],
2581 pws->d[i], pwt->d[i],
2582 float_muladd_negate_product, 64);
2589 check_msacsr_cause(env);
2591 msa_move_v(pwd, pwx);
2594 void helper_msa_fexp2_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2595 uint32_t ws, uint32_t wt)
2597 wr_t wx, *pwx = &wx;
2598 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2599 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2600 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2603 clear_msacsr_cause(env);
2607 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2608 MSA_FLOAT_BINOP(pwx->w[i], scalbn, pws->w[i],
2609 pwt->w[i] > 0x200 ? 0x200 :
2610 pwt->w[i] < -0x200 ? -0x200 : pwt->w[i],
2615 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2616 MSA_FLOAT_BINOP(pwx->d[i], scalbn, pws->d[i],
2617 pwt->d[i] > 0x1000 ? 0x1000 :
2618 pwt->d[i] < -0x1000 ? -0x1000 : pwt->d[i],
2626 check_msacsr_cause(env);
2628 msa_move_v(pwd, pwx);
2631 #define MSA_FLOAT_UNOP(DEST, OP, ARG, BITS) \
2635 set_float_exception_flags(0, &env->active_tc.msa_fp_status); \
2636 DEST = float ## BITS ## _ ## OP(ARG, &env->active_tc.msa_fp_status);\
2637 c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
2639 if (get_enabled_exceptions(env, c)) { \
2640 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
2644 void helper_msa_fexdo_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2645 uint32_t ws, uint32_t wt)
2647 wr_t wx, *pwx = &wx;
2648 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2649 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2650 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2655 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2656 /* Half precision floats come in two formats: standard
2657 IEEE and "ARM" format. The latter gains extra exponent
2658 range by omitting the NaN/Inf encodings. */
2661 MSA_FLOAT_BINOP(Lh(pwx, i), from_float32, pws->w[i], ieee, 16);
2662 MSA_FLOAT_BINOP(Rh(pwx, i), from_float32, pwt->w[i], ieee, 16);
2666 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2667 MSA_FLOAT_UNOP(Lw(pwx, i), from_float64, pws->d[i], 32);
2668 MSA_FLOAT_UNOP(Rw(pwx, i), from_float64, pwt->d[i], 32);
2675 check_msacsr_cause(env);
2676 msa_move_v(pwd, pwx);
2679 #define MSA_FLOAT_UNOP_XD(DEST, OP, ARG, BITS, XBITS) \
2683 set_float_exception_flags(0, &env->active_tc.msa_fp_status); \
2684 DEST = float ## BITS ## _ ## OP(ARG, &env->active_tc.msa_fp_status);\
2685 c = update_msacsr(env, CLEAR_FS_UNDERFLOW, 0); \
2687 if (get_enabled_exceptions(env, c)) { \
2688 DEST = ((FLOAT_SNAN ## XBITS >> 6) << 6) | c; \
2692 void helper_msa_ftq_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2693 uint32_t ws, uint32_t wt)
2695 wr_t wx, *pwx = &wx;
2696 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2697 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2698 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2701 clear_msacsr_cause(env);
2705 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2706 MSA_FLOAT_UNOP_XD(Lh(pwx, i), to_q16, pws->w[i], 32, 16);
2707 MSA_FLOAT_UNOP_XD(Rh(pwx, i), to_q16, pwt->w[i], 32, 16);
2711 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2712 MSA_FLOAT_UNOP_XD(Lw(pwx, i), to_q32, pws->d[i], 64, 32);
2713 MSA_FLOAT_UNOP_XD(Rw(pwx, i), to_q32, pwt->d[i], 64, 32);
2720 check_msacsr_cause(env);
2722 msa_move_v(pwd, pwx);
2725 #define NUMBER_QNAN_PAIR(ARG1, ARG2, BITS) \
2726 !float ## BITS ## _is_any_nan(ARG1) \
2727 && float ## BITS ## _is_quiet_nan(ARG2)
2729 #define MSA_FLOAT_MAXOP(DEST, OP, ARG1, ARG2, BITS) \
2733 set_float_exception_flags(0, &env->active_tc.msa_fp_status); \
2734 DEST = float ## BITS ## _ ## OP(ARG1, ARG2, \
2735 &env->active_tc.msa_fp_status); \
2736 c = update_msacsr(env, 0, 0); \
2738 if (get_enabled_exceptions(env, c)) { \
2739 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
2743 #define FMAXMIN_A(F, G, X, _S, _T, BITS) \
2745 uint## BITS ##_t S = _S, T = _T; \
2746 uint## BITS ##_t as, at, xs, xt, xd; \
2747 if (NUMBER_QNAN_PAIR(S, T, BITS)) { \
2750 else if (NUMBER_QNAN_PAIR(T, S, BITS)) { \
2753 as = float## BITS ##_abs(S); \
2754 at = float## BITS ##_abs(T); \
2755 MSA_FLOAT_MAXOP(xs, F, S, T, BITS); \
2756 MSA_FLOAT_MAXOP(xt, G, S, T, BITS); \
2757 MSA_FLOAT_MAXOP(xd, F, as, at, BITS); \
2758 X = (as == at || xd == float## BITS ##_abs(xs)) ? xs : xt; \
2761 void helper_msa_fmin_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2762 uint32_t ws, uint32_t wt)
2764 wr_t wx, *pwx = &wx;
2765 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2766 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2767 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2770 clear_msacsr_cause(env);
2774 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2775 if (NUMBER_QNAN_PAIR(pws->w[i], pwt->w[i], 32)) {
2776 MSA_FLOAT_MAXOP(pwx->w[i], min, pws->w[i], pws->w[i], 32);
2777 } else if (NUMBER_QNAN_PAIR(pwt->w[i], pws->w[i], 32)) {
2778 MSA_FLOAT_MAXOP(pwx->w[i], min, pwt->w[i], pwt->w[i], 32);
2780 MSA_FLOAT_MAXOP(pwx->w[i], min, pws->w[i], pwt->w[i], 32);
2785 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2786 if (NUMBER_QNAN_PAIR(pws->d[i], pwt->d[i], 64)) {
2787 MSA_FLOAT_MAXOP(pwx->d[i], min, pws->d[i], pws->d[i], 64);
2788 } else if (NUMBER_QNAN_PAIR(pwt->d[i], pws->d[i], 64)) {
2789 MSA_FLOAT_MAXOP(pwx->d[i], min, pwt->d[i], pwt->d[i], 64);
2791 MSA_FLOAT_MAXOP(pwx->d[i], min, pws->d[i], pwt->d[i], 64);
2799 check_msacsr_cause(env);
2801 msa_move_v(pwd, pwx);
2804 void helper_msa_fmin_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2805 uint32_t ws, uint32_t wt)
2807 wr_t wx, *pwx = &wx;
2808 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2809 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2810 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2813 clear_msacsr_cause(env);
2817 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2818 FMAXMIN_A(min, max, pwx->w[i], pws->w[i], pwt->w[i], 32);
2822 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2823 FMAXMIN_A(min, max, pwx->d[i], pws->d[i], pwt->d[i], 64);
2830 check_msacsr_cause(env);
2832 msa_move_v(pwd, pwx);
2835 void helper_msa_fmax_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2836 uint32_t ws, uint32_t wt)
2838 wr_t wx, *pwx = &wx;
2839 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2840 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2841 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2844 clear_msacsr_cause(env);
2848 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2849 if (NUMBER_QNAN_PAIR(pws->w[i], pwt->w[i], 32)) {
2850 MSA_FLOAT_MAXOP(pwx->w[i], max, pws->w[i], pws->w[i], 32);
2851 } else if (NUMBER_QNAN_PAIR(pwt->w[i], pws->w[i], 32)) {
2852 MSA_FLOAT_MAXOP(pwx->w[i], max, pwt->w[i], pwt->w[i], 32);
2854 MSA_FLOAT_MAXOP(pwx->w[i], max, pws->w[i], pwt->w[i], 32);
2859 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2860 if (NUMBER_QNAN_PAIR(pws->d[i], pwt->d[i], 64)) {
2861 MSA_FLOAT_MAXOP(pwx->d[i], max, pws->d[i], pws->d[i], 64);
2862 } else if (NUMBER_QNAN_PAIR(pwt->d[i], pws->d[i], 64)) {
2863 MSA_FLOAT_MAXOP(pwx->d[i], max, pwt->d[i], pwt->d[i], 64);
2865 MSA_FLOAT_MAXOP(pwx->d[i], max, pws->d[i], pwt->d[i], 64);
2873 check_msacsr_cause(env);
2875 msa_move_v(pwd, pwx);
2878 void helper_msa_fmax_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2879 uint32_t ws, uint32_t wt)
2881 wr_t wx, *pwx = &wx;
2882 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2883 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2884 wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
2887 clear_msacsr_cause(env);
2891 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2892 FMAXMIN_A(max, min, pwx->w[i], pws->w[i], pwt->w[i], 32);
2896 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2897 FMAXMIN_A(max, min, pwx->d[i], pws->d[i], pwt->d[i], 64);
2904 check_msacsr_cause(env);
2906 msa_move_v(pwd, pwx);
2909 void helper_msa_fclass_df(CPUMIPSState *env, uint32_t df,
2910 uint32_t wd, uint32_t ws)
2912 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2913 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2914 if (df == DF_WORD) {
2915 pwd->w[0] = helper_float_class_s(pws->w[0]);
2916 pwd->w[1] = helper_float_class_s(pws->w[1]);
2917 pwd->w[2] = helper_float_class_s(pws->w[2]);
2918 pwd->w[3] = helper_float_class_s(pws->w[3]);
2920 pwd->d[0] = helper_float_class_d(pws->d[0]);
2921 pwd->d[1] = helper_float_class_d(pws->d[1]);
2925 #define MSA_FLOAT_UNOP0(DEST, OP, ARG, BITS) \
2929 set_float_exception_flags(0, &env->active_tc.msa_fp_status); \
2930 DEST = float ## BITS ## _ ## OP(ARG, &env->active_tc.msa_fp_status);\
2931 c = update_msacsr(env, CLEAR_FS_UNDERFLOW, 0); \
2933 if (get_enabled_exceptions(env, c)) { \
2934 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
2935 } else if (float ## BITS ## _is_any_nan(ARG)) { \
2940 void helper_msa_ftrunc_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2943 wr_t wx, *pwx = &wx;
2944 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2945 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2948 clear_msacsr_cause(env);
2952 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2953 MSA_FLOAT_UNOP0(pwx->w[i], to_int32_round_to_zero, pws->w[i], 32);
2957 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2958 MSA_FLOAT_UNOP0(pwx->d[i], to_int64_round_to_zero, pws->d[i], 64);
2965 check_msacsr_cause(env);
2967 msa_move_v(pwd, pwx);
2970 void helper_msa_ftrunc_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
2973 wr_t wx, *pwx = &wx;
2974 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
2975 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
2978 clear_msacsr_cause(env);
2982 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
2983 MSA_FLOAT_UNOP0(pwx->w[i], to_uint32_round_to_zero, pws->w[i], 32);
2987 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
2988 MSA_FLOAT_UNOP0(pwx->d[i], to_uint64_round_to_zero, pws->d[i], 64);
2995 check_msacsr_cause(env);
2997 msa_move_v(pwd, pwx);
3000 void helper_msa_fsqrt_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3003 wr_t wx, *pwx = &wx;
3004 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3005 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3008 clear_msacsr_cause(env);
3012 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3013 MSA_FLOAT_UNOP(pwx->w[i], sqrt, pws->w[i], 32);
3017 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3018 MSA_FLOAT_UNOP(pwx->d[i], sqrt, pws->d[i], 64);
3025 check_msacsr_cause(env);
3027 msa_move_v(pwd, pwx);
3030 #define MSA_FLOAT_RECIPROCAL(DEST, ARG, BITS) \
3034 set_float_exception_flags(0, &env->active_tc.msa_fp_status); \
3035 DEST = float ## BITS ## _ ## div(FLOAT_ONE ## BITS, ARG, \
3036 &env->active_tc.msa_fp_status); \
3037 c = update_msacsr(env, float ## BITS ## _is_infinity(ARG) || \
3038 float ## BITS ## _is_quiet_nan(DEST) ? \
3039 0 : RECIPROCAL_INEXACT, \
3040 IS_DENORMAL(DEST, BITS)); \
3042 if (get_enabled_exceptions(env, c)) { \
3043 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
3047 void helper_msa_frsqrt_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3050 wr_t wx, *pwx = &wx;
3051 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3052 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3055 clear_msacsr_cause(env);
3059 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3060 MSA_FLOAT_RECIPROCAL(pwx->w[i], float32_sqrt(pws->w[i],
3061 &env->active_tc.msa_fp_status), 32);
3065 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3066 MSA_FLOAT_RECIPROCAL(pwx->d[i], float64_sqrt(pws->d[i],
3067 &env->active_tc.msa_fp_status), 64);
3074 check_msacsr_cause(env);
3076 msa_move_v(pwd, pwx);
3079 void helper_msa_frcp_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3082 wr_t wx, *pwx = &wx;
3083 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3084 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3087 clear_msacsr_cause(env);
3091 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3092 MSA_FLOAT_RECIPROCAL(pwx->w[i], pws->w[i], 32);
3096 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3097 MSA_FLOAT_RECIPROCAL(pwx->d[i], pws->d[i], 64);
3104 check_msacsr_cause(env);
3106 msa_move_v(pwd, pwx);
3109 void helper_msa_frint_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3112 wr_t wx, *pwx = &wx;
3113 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3114 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3117 clear_msacsr_cause(env);
3121 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3122 MSA_FLOAT_UNOP(pwx->w[i], round_to_int, pws->w[i], 32);
3126 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3127 MSA_FLOAT_UNOP(pwx->d[i], round_to_int, pws->d[i], 64);
3134 check_msacsr_cause(env);
3136 msa_move_v(pwd, pwx);
3139 #define MSA_FLOAT_LOGB(DEST, ARG, BITS) \
3143 set_float_exception_flags(0, &env->active_tc.msa_fp_status); \
3144 set_float_rounding_mode(float_round_down, \
3145 &env->active_tc.msa_fp_status); \
3146 DEST = float ## BITS ## _ ## log2(ARG, \
3147 &env->active_tc.msa_fp_status); \
3148 DEST = float ## BITS ## _ ## round_to_int(DEST, \
3149 &env->active_tc.msa_fp_status); \
3150 set_float_rounding_mode(ieee_rm[(env->active_tc.msacsr & \
3151 MSACSR_RM_MASK) >> MSACSR_RM], \
3152 &env->active_tc.msa_fp_status); \
3154 set_float_exception_flags( \
3155 get_float_exception_flags(&env->active_tc.msa_fp_status) \
3156 & (~float_flag_inexact), \
3157 &env->active_tc.msa_fp_status); \
3159 c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
3161 if (get_enabled_exceptions(env, c)) { \
3162 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
3166 void helper_msa_flog2_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3169 wr_t wx, *pwx = &wx;
3170 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3171 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3174 clear_msacsr_cause(env);
3178 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3179 MSA_FLOAT_LOGB(pwx->w[i], pws->w[i], 32);
3183 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3184 MSA_FLOAT_LOGB(pwx->d[i], pws->d[i], 64);
3191 check_msacsr_cause(env);
3193 msa_move_v(pwd, pwx);
3196 void helper_msa_fexupl_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3199 wr_t wx, *pwx = &wx;
3200 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3201 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3206 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3207 /* Half precision floats come in two formats: standard
3208 IEEE and "ARM" format. The latter gains extra exponent
3209 range by omitting the NaN/Inf encodings. */
3212 MSA_FLOAT_BINOP(pwx->w[i], from_float16, Lh(pws, i), ieee, 32);
3216 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3217 MSA_FLOAT_UNOP(pwx->d[i], from_float32, Lw(pws, i), 64);
3224 check_msacsr_cause(env);
3225 msa_move_v(pwd, pwx);
3228 void helper_msa_fexupr_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3231 wr_t wx, *pwx = &wx;
3232 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3233 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3238 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3239 /* Half precision floats come in two formats: standard
3240 IEEE and "ARM" format. The latter gains extra exponent
3241 range by omitting the NaN/Inf encodings. */
3244 MSA_FLOAT_BINOP(pwx->w[i], from_float16, Rh(pws, i), ieee, 32);
3248 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3249 MSA_FLOAT_UNOP(pwx->d[i], from_float32, Rw(pws, i), 64);
3256 check_msacsr_cause(env);
3257 msa_move_v(pwd, pwx);
3260 void helper_msa_ffql_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3263 wr_t wx, *pwx = &wx;
3264 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3265 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3270 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3271 MSA_FLOAT_UNOP(pwx->w[i], from_q16, Lh(pws, i), 32);
3275 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3276 MSA_FLOAT_UNOP(pwx->d[i], from_q32, Lw(pws, i), 64);
3283 msa_move_v(pwd, pwx);
3286 void helper_msa_ffqr_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3289 wr_t wx, *pwx = &wx;
3290 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3291 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3296 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3297 MSA_FLOAT_UNOP(pwx->w[i], from_q16, Rh(pws, i), 32);
3301 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3302 MSA_FLOAT_UNOP(pwx->d[i], from_q32, Rw(pws, i), 64);
3309 msa_move_v(pwd, pwx);
3312 void helper_msa_ftint_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3315 wr_t wx, *pwx = &wx;
3316 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3317 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3320 clear_msacsr_cause(env);
3324 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3325 MSA_FLOAT_UNOP0(pwx->w[i], to_int32, pws->w[i], 32);
3329 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3330 MSA_FLOAT_UNOP0(pwx->d[i], to_int64, pws->d[i], 64);
3337 check_msacsr_cause(env);
3339 msa_move_v(pwd, pwx);
3342 void helper_msa_ftint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3345 wr_t wx, *pwx = &wx;
3346 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3347 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3350 clear_msacsr_cause(env);
3354 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3355 MSA_FLOAT_UNOP0(pwx->w[i], to_uint32, pws->w[i], 32);
3359 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3360 MSA_FLOAT_UNOP0(pwx->d[i], to_uint64, pws->d[i], 64);
3367 check_msacsr_cause(env);
3369 msa_move_v(pwd, pwx);
3372 #define float32_from_int32 int32_to_float32
3373 #define float32_from_uint32 uint32_to_float32
3375 #define float64_from_int64 int64_to_float64
3376 #define float64_from_uint64 uint64_to_float64
3378 void helper_msa_ffint_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3381 wr_t wx, *pwx = &wx;
3382 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3383 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3386 clear_msacsr_cause(env);
3390 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3391 MSA_FLOAT_UNOP(pwx->w[i], from_int32, pws->w[i], 32);
3395 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3396 MSA_FLOAT_UNOP(pwx->d[i], from_int64, pws->d[i], 64);
3403 check_msacsr_cause(env);
3405 msa_move_v(pwd, pwx);
3408 void helper_msa_ffint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
3411 wr_t wx, *pwx = &wx;
3412 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3413 wr_t *pws = &(env->active_fpu.fpr[ws].wr);
3416 clear_msacsr_cause(env);
3420 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3421 MSA_FLOAT_UNOP(pwx->w[i], from_uint32, pws->w[i], 32);
3425 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3426 MSA_FLOAT_UNOP(pwx->d[i], from_uint64, pws->d[i], 64);
3433 check_msacsr_cause(env);
3435 msa_move_v(pwd, pwx);