2 * MIPS emulation helpers for qemu.
4 * Copyright (c) 2004-2005 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "host-utils.h"
25 #if !defined(CONFIG_USER_ONLY)
26 #include "softmmu_exec.h"
27 #endif /* !defined(CONFIG_USER_ONLY) */
29 #ifndef CONFIG_USER_ONLY
30 static inline void cpu_mips_tlb_flush (CPUMIPSState *env, int flush_global);
33 /*****************************************************************************/
34 /* Exceptions processing helpers */
36 static inline void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env,
43 if (exception < 0x100)
44 qemu_log("%s: %d %d\n", __func__, exception, error_code);
46 env->exception_index = exception;
47 env->error_code = error_code;
50 /* now we have a real cpu fault */
53 /* the PC is inside the translated code. It means that we have
54 a virtual CPU fault */
55 cpu_restore_state(tb, env, pc);
62 static inline void QEMU_NORETURN do_raise_exception(CPUMIPSState *env,
66 do_raise_exception_err(env, exception, 0, pc);
69 void helper_raise_exception_err(CPUMIPSState *env, uint32_t exception,
72 do_raise_exception_err(env, exception, error_code, 0);
75 void helper_raise_exception(CPUMIPSState *env, uint32_t exception)
77 do_raise_exception(env, exception, 0);
80 #if defined(CONFIG_USER_ONLY)
81 #define HELPER_LD(name, insn, type) \
82 static inline type do_##name(CPUMIPSState *env, target_ulong addr, \
85 return (type) insn##_raw(addr); \
88 #define HELPER_LD(name, insn, type) \
89 static inline type do_##name(CPUMIPSState *env, target_ulong addr, \
94 case 0: return (type) cpu_##insn##_kernel(env, addr); break; \
95 case 1: return (type) cpu_##insn##_super(env, addr); break; \
97 case 2: return (type) cpu_##insn##_user(env, addr); break; \
101 HELPER_LD(lbu, ldub, uint8_t)
102 HELPER_LD(lw, ldl, int32_t)
104 HELPER_LD(ld, ldq, int64_t)
108 #if defined(CONFIG_USER_ONLY)
109 #define HELPER_ST(name, insn, type) \
110 static inline void do_##name(CPUMIPSState *env, target_ulong addr, \
111 type val, int mem_idx) \
113 insn##_raw(addr, val); \
116 #define HELPER_ST(name, insn, type) \
117 static inline void do_##name(CPUMIPSState *env, target_ulong addr, \
118 type val, int mem_idx) \
122 case 0: cpu_##insn##_kernel(env, addr, val); break; \
123 case 1: cpu_##insn##_super(env, addr, val); break; \
125 case 2: cpu_##insn##_user(env, addr, val); break; \
129 HELPER_ST(sb, stb, uint8_t)
130 HELPER_ST(sw, stl, uint32_t)
132 HELPER_ST(sd, stq, uint64_t)
136 target_ulong helper_clo (target_ulong arg1)
141 target_ulong helper_clz (target_ulong arg1)
146 #if defined(TARGET_MIPS64)
147 target_ulong helper_dclo (target_ulong arg1)
152 target_ulong helper_dclz (target_ulong arg1)
156 #endif /* TARGET_MIPS64 */
158 /* 64 bits arithmetic for 32 bits hosts */
159 static inline uint64_t get_HILO(CPUMIPSState *env)
161 return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
164 static inline target_ulong set_HIT0_LO(CPUMIPSState *env, uint64_t HILO)
167 env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
168 tmp = env->active_tc.HI[0] = (int32_t)(HILO >> 32);
172 static inline target_ulong set_HI_LOT0(CPUMIPSState *env, uint64_t HILO)
174 target_ulong tmp = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
175 env->active_tc.HI[0] = (int32_t)(HILO >> 32);
179 /* Multiplication variants of the vr54xx. */
180 target_ulong helper_muls(CPUMIPSState *env, target_ulong arg1,
183 return set_HI_LOT0(env, 0 - ((int64_t)(int32_t)arg1 *
184 (int64_t)(int32_t)arg2));
187 target_ulong helper_mulsu(CPUMIPSState *env, target_ulong arg1,
190 return set_HI_LOT0(env, 0 - (uint64_t)(uint32_t)arg1 *
191 (uint64_t)(uint32_t)arg2);
194 target_ulong helper_macc(CPUMIPSState *env, target_ulong arg1,
197 return set_HI_LOT0(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 *
198 (int64_t)(int32_t)arg2);
201 target_ulong helper_macchi(CPUMIPSState *env, target_ulong arg1,
204 return set_HIT0_LO(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 *
205 (int64_t)(int32_t)arg2);
208 target_ulong helper_maccu(CPUMIPSState *env, target_ulong arg1,
211 return set_HI_LOT0(env, (uint64_t)get_HILO(env) +
212 (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
215 target_ulong helper_macchiu(CPUMIPSState *env, target_ulong arg1,
218 return set_HIT0_LO(env, (uint64_t)get_HILO(env) +
219 (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
222 target_ulong helper_msac(CPUMIPSState *env, target_ulong arg1,
225 return set_HI_LOT0(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 *
226 (int64_t)(int32_t)arg2);
229 target_ulong helper_msachi(CPUMIPSState *env, target_ulong arg1,
232 return set_HIT0_LO(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 *
233 (int64_t)(int32_t)arg2);
236 target_ulong helper_msacu(CPUMIPSState *env, target_ulong arg1,
239 return set_HI_LOT0(env, (uint64_t)get_HILO(env) -
240 (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
243 target_ulong helper_msachiu(CPUMIPSState *env, target_ulong arg1,
246 return set_HIT0_LO(env, (uint64_t)get_HILO(env) -
247 (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
250 target_ulong helper_mulhi(CPUMIPSState *env, target_ulong arg1,
253 return set_HIT0_LO(env, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
256 target_ulong helper_mulhiu(CPUMIPSState *env, target_ulong arg1,
259 return set_HIT0_LO(env, (uint64_t)(uint32_t)arg1 *
260 (uint64_t)(uint32_t)arg2);
263 target_ulong helper_mulshi(CPUMIPSState *env, target_ulong arg1,
266 return set_HIT0_LO(env, 0 - (int64_t)(int32_t)arg1 *
267 (int64_t)(int32_t)arg2);
270 target_ulong helper_mulshiu(CPUMIPSState *env, target_ulong arg1,
273 return set_HIT0_LO(env, 0 - (uint64_t)(uint32_t)arg1 *
274 (uint64_t)(uint32_t)arg2);
278 void helper_dmult(CPUMIPSState *env, target_ulong arg1, target_ulong arg2)
280 muls64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
283 void helper_dmultu(CPUMIPSState *env, target_ulong arg1, target_ulong arg2)
285 mulu64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
289 #ifndef CONFIG_USER_ONLY
291 static inline hwaddr do_translate_address(CPUMIPSState *env,
292 target_ulong address,
297 lladdr = cpu_mips_translate_address(env, address, rw);
299 if (lladdr == -1LL) {
306 #define HELPER_LD_ATOMIC(name, insn) \
307 target_ulong helper_##name(CPUMIPSState *env, target_ulong arg, int mem_idx) \
309 env->lladdr = do_translate_address(env, arg, 0); \
310 env->llval = do_##insn(env, arg, mem_idx); \
313 HELPER_LD_ATOMIC(ll, lw)
315 HELPER_LD_ATOMIC(lld, ld)
317 #undef HELPER_LD_ATOMIC
319 #define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask) \
320 target_ulong helper_##name(CPUMIPSState *env, target_ulong arg1, \
321 target_ulong arg2, int mem_idx) \
325 if (arg2 & almask) { \
326 env->CP0_BadVAddr = arg2; \
327 helper_raise_exception(env, EXCP_AdES); \
329 if (do_translate_address(env, arg2, 1) == env->lladdr) { \
330 tmp = do_##ld_insn(env, arg2, mem_idx); \
331 if (tmp == env->llval) { \
332 do_##st_insn(env, arg2, arg1, mem_idx); \
338 HELPER_ST_ATOMIC(sc, lw, sw, 0x3)
340 HELPER_ST_ATOMIC(scd, ld, sd, 0x7)
342 #undef HELPER_ST_ATOMIC
345 #ifdef TARGET_WORDS_BIGENDIAN
346 #define GET_LMASK(v) ((v) & 3)
347 #define GET_OFFSET(addr, offset) (addr + (offset))
349 #define GET_LMASK(v) (((v) & 3) ^ 3)
350 #define GET_OFFSET(addr, offset) (addr - (offset))
353 target_ulong helper_lwl(CPUMIPSState *env, target_ulong arg1,
354 target_ulong arg2, int mem_idx)
358 tmp = do_lbu(env, arg2, mem_idx);
359 arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
361 if (GET_LMASK(arg2) <= 2) {
362 tmp = do_lbu(env, GET_OFFSET(arg2, 1), mem_idx);
363 arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
366 if (GET_LMASK(arg2) <= 1) {
367 tmp = do_lbu(env, GET_OFFSET(arg2, 2), mem_idx);
368 arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
371 if (GET_LMASK(arg2) == 0) {
372 tmp = do_lbu(env, GET_OFFSET(arg2, 3), mem_idx);
373 arg1 = (arg1 & 0xFFFFFF00) | tmp;
375 return (int32_t)arg1;
378 target_ulong helper_lwr(CPUMIPSState *env, target_ulong arg1,
379 target_ulong arg2, int mem_idx)
383 tmp = do_lbu(env, arg2, mem_idx);
384 arg1 = (arg1 & 0xFFFFFF00) | tmp;
386 if (GET_LMASK(arg2) >= 1) {
387 tmp = do_lbu(env, GET_OFFSET(arg2, -1), mem_idx);
388 arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
391 if (GET_LMASK(arg2) >= 2) {
392 tmp = do_lbu(env, GET_OFFSET(arg2, -2), mem_idx);
393 arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
396 if (GET_LMASK(arg2) == 3) {
397 tmp = do_lbu(env, GET_OFFSET(arg2, -3), mem_idx);
398 arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
400 return (int32_t)arg1;
403 void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
406 do_sb(env, arg2, (uint8_t)(arg1 >> 24), mem_idx);
408 if (GET_LMASK(arg2) <= 2)
409 do_sb(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx);
411 if (GET_LMASK(arg2) <= 1)
412 do_sb(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx);
414 if (GET_LMASK(arg2) == 0)
415 do_sb(env, GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx);
418 void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
421 do_sb(env, arg2, (uint8_t)arg1, mem_idx);
423 if (GET_LMASK(arg2) >= 1)
424 do_sb(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
426 if (GET_LMASK(arg2) >= 2)
427 do_sb(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
429 if (GET_LMASK(arg2) == 3)
430 do_sb(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
433 #if defined(TARGET_MIPS64)
434 /* "half" load and stores. We must do the memory access inline,
435 or fault handling won't work. */
437 #ifdef TARGET_WORDS_BIGENDIAN
438 #define GET_LMASK64(v) ((v) & 7)
440 #define GET_LMASK64(v) (((v) & 7) ^ 7)
443 target_ulong helper_ldl(CPUMIPSState *env, target_ulong arg1,
444 target_ulong arg2, int mem_idx)
448 tmp = do_lbu(env, arg2, mem_idx);
449 arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
451 if (GET_LMASK64(arg2) <= 6) {
452 tmp = do_lbu(env, GET_OFFSET(arg2, 1), mem_idx);
453 arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
456 if (GET_LMASK64(arg2) <= 5) {
457 tmp = do_lbu(env, GET_OFFSET(arg2, 2), mem_idx);
458 arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
461 if (GET_LMASK64(arg2) <= 4) {
462 tmp = do_lbu(env, GET_OFFSET(arg2, 3), mem_idx);
463 arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
466 if (GET_LMASK64(arg2) <= 3) {
467 tmp = do_lbu(env, GET_OFFSET(arg2, 4), mem_idx);
468 arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
471 if (GET_LMASK64(arg2) <= 2) {
472 tmp = do_lbu(env, GET_OFFSET(arg2, 5), mem_idx);
473 arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
476 if (GET_LMASK64(arg2) <= 1) {
477 tmp = do_lbu(env, GET_OFFSET(arg2, 6), mem_idx);
478 arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
481 if (GET_LMASK64(arg2) == 0) {
482 tmp = do_lbu(env, GET_OFFSET(arg2, 7), mem_idx);
483 arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
489 target_ulong helper_ldr(CPUMIPSState *env, target_ulong arg1,
490 target_ulong arg2, int mem_idx)
494 tmp = do_lbu(env, arg2, mem_idx);
495 arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
497 if (GET_LMASK64(arg2) >= 1) {
498 tmp = do_lbu(env, GET_OFFSET(arg2, -1), mem_idx);
499 arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
502 if (GET_LMASK64(arg2) >= 2) {
503 tmp = do_lbu(env, GET_OFFSET(arg2, -2), mem_idx);
504 arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
507 if (GET_LMASK64(arg2) >= 3) {
508 tmp = do_lbu(env, GET_OFFSET(arg2, -3), mem_idx);
509 arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
512 if (GET_LMASK64(arg2) >= 4) {
513 tmp = do_lbu(env, GET_OFFSET(arg2, -4), mem_idx);
514 arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
517 if (GET_LMASK64(arg2) >= 5) {
518 tmp = do_lbu(env, GET_OFFSET(arg2, -5), mem_idx);
519 arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
522 if (GET_LMASK64(arg2) >= 6) {
523 tmp = do_lbu(env, GET_OFFSET(arg2, -6), mem_idx);
524 arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
527 if (GET_LMASK64(arg2) == 7) {
528 tmp = do_lbu(env, GET_OFFSET(arg2, -7), mem_idx);
529 arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
535 void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
538 do_sb(env, arg2, (uint8_t)(arg1 >> 56), mem_idx);
540 if (GET_LMASK64(arg2) <= 6)
541 do_sb(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx);
543 if (GET_LMASK64(arg2) <= 5)
544 do_sb(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx);
546 if (GET_LMASK64(arg2) <= 4)
547 do_sb(env, GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx);
549 if (GET_LMASK64(arg2) <= 3)
550 do_sb(env, GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx);
552 if (GET_LMASK64(arg2) <= 2)
553 do_sb(env, GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx);
555 if (GET_LMASK64(arg2) <= 1)
556 do_sb(env, GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx);
558 if (GET_LMASK64(arg2) <= 0)
559 do_sb(env, GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx);
562 void helper_sdr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
565 do_sb(env, arg2, (uint8_t)arg1, mem_idx);
567 if (GET_LMASK64(arg2) >= 1)
568 do_sb(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
570 if (GET_LMASK64(arg2) >= 2)
571 do_sb(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
573 if (GET_LMASK64(arg2) >= 3)
574 do_sb(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
576 if (GET_LMASK64(arg2) >= 4)
577 do_sb(env, GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx);
579 if (GET_LMASK64(arg2) >= 5)
580 do_sb(env, GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx);
582 if (GET_LMASK64(arg2) >= 6)
583 do_sb(env, GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx);
585 if (GET_LMASK64(arg2) == 7)
586 do_sb(env, GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx);
588 #endif /* TARGET_MIPS64 */
590 static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
592 void helper_lwm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
595 target_ulong base_reglist = reglist & 0xf;
596 target_ulong do_r31 = reglist & 0x10;
597 #ifdef CONFIG_USER_ONLY
599 #define ldfun(env, addr) ldl_raw(addr)
601 uint32_t (*ldfun)(CPUMIPSState *env, target_ulong);
605 case 0: ldfun = cpu_ldl_kernel; break;
606 case 1: ldfun = cpu_ldl_super; break;
608 case 2: ldfun = cpu_ldl_user; break;
612 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
615 for (i = 0; i < base_reglist; i++) {
616 env->active_tc.gpr[multiple_regs[i]] = (target_long)ldfun(env, addr);
622 env->active_tc.gpr[31] = (target_long)ldfun(env, addr);
626 void helper_swm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
629 target_ulong base_reglist = reglist & 0xf;
630 target_ulong do_r31 = reglist & 0x10;
631 #ifdef CONFIG_USER_ONLY
633 #define stfun(env, addr, val) stl_raw(addr, val)
635 void (*stfun)(CPUMIPSState *env, target_ulong, uint32_t);
639 case 0: stfun = cpu_stl_kernel; break;
640 case 1: stfun = cpu_stl_super; break;
642 case 2: stfun = cpu_stl_user; break;
646 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
649 for (i = 0; i < base_reglist; i++) {
650 stfun(env, addr, env->active_tc.gpr[multiple_regs[i]]);
656 stfun(env, addr, env->active_tc.gpr[31]);
660 #if defined(TARGET_MIPS64)
661 void helper_ldm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
664 target_ulong base_reglist = reglist & 0xf;
665 target_ulong do_r31 = reglist & 0x10;
666 #ifdef CONFIG_USER_ONLY
668 #define ldfun(env, addr) ldq_raw(addr)
670 uint64_t (*ldfun)(CPUMIPSState *env, target_ulong);
674 case 0: ldfun = cpu_ldq_kernel; break;
675 case 1: ldfun = cpu_ldq_super; break;
677 case 2: ldfun = cpu_ldq_user; break;
681 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
684 for (i = 0; i < base_reglist; i++) {
685 env->active_tc.gpr[multiple_regs[i]] = ldfun(env, addr);
691 env->active_tc.gpr[31] = ldfun(env, addr);
695 void helper_sdm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
698 target_ulong base_reglist = reglist & 0xf;
699 target_ulong do_r31 = reglist & 0x10;
700 #ifdef CONFIG_USER_ONLY
702 #define stfun(env, addr, val) stq_raw(addr, val)
704 void (*stfun)(CPUMIPSState *env, target_ulong, uint64_t);
708 case 0: stfun = cpu_stq_kernel; break;
709 case 1: stfun = cpu_stq_super; break;
711 case 2: stfun = cpu_stq_user; break;
715 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
718 for (i = 0; i < base_reglist; i++) {
719 stfun(env, addr, env->active_tc.gpr[multiple_regs[i]]);
725 stfun(env, addr, env->active_tc.gpr[31]);
730 #ifndef CONFIG_USER_ONLY
732 static bool mips_vpe_is_wfi(MIPSCPU *c)
734 CPUMIPSState *env = &c->env;
736 /* If the VPE is halted but otherwise active, it means it's waiting for
738 return env->halted && mips_vpe_active(env);
741 static inline void mips_vpe_wake(CPUMIPSState *c)
743 /* Dont set ->halted = 0 directly, let it be done via cpu_has_work
744 because there might be other conditions that state that c should
746 cpu_interrupt(c, CPU_INTERRUPT_WAKE);
749 static inline void mips_vpe_sleep(MIPSCPU *cpu)
751 CPUMIPSState *c = &cpu->env;
753 /* The VPE was shut off, really go to bed.
754 Reset any old _WAKE requests. */
756 cpu_reset_interrupt(c, CPU_INTERRUPT_WAKE);
759 static inline void mips_tc_wake(MIPSCPU *cpu, int tc)
761 CPUMIPSState *c = &cpu->env;
763 /* FIXME: TC reschedule. */
764 if (mips_vpe_active(c) && !mips_vpe_is_wfi(cpu)) {
769 static inline void mips_tc_sleep(MIPSCPU *cpu, int tc)
771 CPUMIPSState *c = &cpu->env;
773 /* FIXME: TC reschedule. */
774 if (!mips_vpe_active(c)) {
779 /* tc should point to an int with the value of the global TC index.
780 This function will transform it into a local index within the
781 returned CPUMIPSState.
783 FIXME: This code assumes that all VPEs have the same number of TCs,
784 which depends on runtime setup. Can probably be fixed by
785 walking the list of CPUMIPSStates. */
786 static CPUMIPSState *mips_cpu_map_tc(CPUMIPSState *env, int *tc)
789 int vpe_idx, nr_threads = env->nr_threads;
792 if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) {
793 /* Not allowed to address other CPUs. */
794 *tc = env->current_tc;
798 vpe_idx = tc_idx / nr_threads;
799 *tc = tc_idx % nr_threads;
800 other = qemu_get_cpu(vpe_idx);
801 return other ? other : env;
804 /* The per VPE CP0_Status register shares some fields with the per TC
805 CP0_TCStatus registers. These fields are wired to the same registers,
806 so changes to either of them should be reflected on both registers.
808 Also, EntryHi shares the bottom 8 bit ASID with TCStauts.
810 These helper call synchronizes the regs for a given cpu. */
812 /* Called for updates to CP0_Status. */
813 static void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc)
815 int32_t tcstatus, *tcst;
816 uint32_t v = cpu->CP0_Status;
817 uint32_t cu, mx, asid, ksu;
818 uint32_t mask = ((1 << CP0TCSt_TCU3)
819 | (1 << CP0TCSt_TCU2)
820 | (1 << CP0TCSt_TCU1)
821 | (1 << CP0TCSt_TCU0)
823 | (3 << CP0TCSt_TKSU)
824 | (0xff << CP0TCSt_TASID));
826 cu = (v >> CP0St_CU0) & 0xf;
827 mx = (v >> CP0St_MX) & 0x1;
828 ksu = (v >> CP0St_KSU) & 0x3;
829 asid = env->CP0_EntryHi & 0xff;
831 tcstatus = cu << CP0TCSt_TCU0;
832 tcstatus |= mx << CP0TCSt_TMX;
833 tcstatus |= ksu << CP0TCSt_TKSU;
836 if (tc == cpu->current_tc) {
837 tcst = &cpu->active_tc.CP0_TCStatus;
839 tcst = &cpu->tcs[tc].CP0_TCStatus;
847 /* Called for updates to CP0_TCStatus. */
848 static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc,
852 uint32_t tcu, tmx, tasid, tksu;
853 uint32_t mask = ((1 << CP0St_CU3)
860 tcu = (v >> CP0TCSt_TCU0) & 0xf;
861 tmx = (v >> CP0TCSt_TMX) & 0x1;
863 tksu = (v >> CP0TCSt_TKSU) & 0x3;
865 status = tcu << CP0St_CU0;
866 status |= tmx << CP0St_MX;
867 status |= tksu << CP0St_KSU;
869 cpu->CP0_Status &= ~mask;
870 cpu->CP0_Status |= status;
872 /* Sync the TASID with EntryHi. */
873 cpu->CP0_EntryHi &= ~0xff;
874 cpu->CP0_EntryHi = tasid;
879 /* Called for updates to CP0_EntryHi. */
880 static void sync_c0_entryhi(CPUMIPSState *cpu, int tc)
883 uint32_t asid, v = cpu->CP0_EntryHi;
887 if (tc == cpu->current_tc) {
888 tcst = &cpu->active_tc.CP0_TCStatus;
890 tcst = &cpu->tcs[tc].CP0_TCStatus;
898 target_ulong helper_mfc0_mvpcontrol(CPUMIPSState *env)
900 return env->mvp->CP0_MVPControl;
903 target_ulong helper_mfc0_mvpconf0(CPUMIPSState *env)
905 return env->mvp->CP0_MVPConf0;
908 target_ulong helper_mfc0_mvpconf1(CPUMIPSState *env)
910 return env->mvp->CP0_MVPConf1;
913 target_ulong helper_mfc0_random(CPUMIPSState *env)
915 return (int32_t)cpu_mips_get_random(env);
918 target_ulong helper_mfc0_tcstatus(CPUMIPSState *env)
920 return env->active_tc.CP0_TCStatus;
923 target_ulong helper_mftc0_tcstatus(CPUMIPSState *env)
925 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
926 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
928 if (other_tc == other->current_tc)
929 return other->active_tc.CP0_TCStatus;
931 return other->tcs[other_tc].CP0_TCStatus;
934 target_ulong helper_mfc0_tcbind(CPUMIPSState *env)
936 return env->active_tc.CP0_TCBind;
939 target_ulong helper_mftc0_tcbind(CPUMIPSState *env)
941 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
942 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
944 if (other_tc == other->current_tc)
945 return other->active_tc.CP0_TCBind;
947 return other->tcs[other_tc].CP0_TCBind;
950 target_ulong helper_mfc0_tcrestart(CPUMIPSState *env)
952 return env->active_tc.PC;
955 target_ulong helper_mftc0_tcrestart(CPUMIPSState *env)
957 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
958 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
960 if (other_tc == other->current_tc)
961 return other->active_tc.PC;
963 return other->tcs[other_tc].PC;
966 target_ulong helper_mfc0_tchalt(CPUMIPSState *env)
968 return env->active_tc.CP0_TCHalt;
971 target_ulong helper_mftc0_tchalt(CPUMIPSState *env)
973 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
974 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
976 if (other_tc == other->current_tc)
977 return other->active_tc.CP0_TCHalt;
979 return other->tcs[other_tc].CP0_TCHalt;
982 target_ulong helper_mfc0_tccontext(CPUMIPSState *env)
984 return env->active_tc.CP0_TCContext;
987 target_ulong helper_mftc0_tccontext(CPUMIPSState *env)
989 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
990 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
992 if (other_tc == other->current_tc)
993 return other->active_tc.CP0_TCContext;
995 return other->tcs[other_tc].CP0_TCContext;
998 target_ulong helper_mfc0_tcschedule(CPUMIPSState *env)
1000 return env->active_tc.CP0_TCSchedule;
1003 target_ulong helper_mftc0_tcschedule(CPUMIPSState *env)
1005 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1006 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1008 if (other_tc == other->current_tc)
1009 return other->active_tc.CP0_TCSchedule;
1011 return other->tcs[other_tc].CP0_TCSchedule;
1014 target_ulong helper_mfc0_tcschefback(CPUMIPSState *env)
1016 return env->active_tc.CP0_TCScheFBack;
1019 target_ulong helper_mftc0_tcschefback(CPUMIPSState *env)
1021 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1022 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1024 if (other_tc == other->current_tc)
1025 return other->active_tc.CP0_TCScheFBack;
1027 return other->tcs[other_tc].CP0_TCScheFBack;
1030 target_ulong helper_mfc0_count(CPUMIPSState *env)
1032 return (int32_t)cpu_mips_get_count(env);
1035 target_ulong helper_mftc0_entryhi(CPUMIPSState *env)
1037 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1038 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1040 return other->CP0_EntryHi;
1043 target_ulong helper_mftc0_cause(CPUMIPSState *env)
1045 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1047 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1049 if (other_tc == other->current_tc) {
1050 tccause = other->CP0_Cause;
1052 tccause = other->CP0_Cause;
1058 target_ulong helper_mftc0_status(CPUMIPSState *env)
1060 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1061 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1063 return other->CP0_Status;
1066 target_ulong helper_mfc0_lladdr(CPUMIPSState *env)
1068 return (int32_t)(env->lladdr >> env->CP0_LLAddr_shift);
1071 target_ulong helper_mfc0_watchlo(CPUMIPSState *env, uint32_t sel)
1073 return (int32_t)env->CP0_WatchLo[sel];
1076 target_ulong helper_mfc0_watchhi(CPUMIPSState *env, uint32_t sel)
1078 return env->CP0_WatchHi[sel];
1081 target_ulong helper_mfc0_debug(CPUMIPSState *env)
1083 target_ulong t0 = env->CP0_Debug;
1084 if (env->hflags & MIPS_HFLAG_DM)
1085 t0 |= 1 << CP0DB_DM;
1090 target_ulong helper_mftc0_debug(CPUMIPSState *env)
1092 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1094 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1096 if (other_tc == other->current_tc)
1097 tcstatus = other->active_tc.CP0_Debug_tcstatus;
1099 tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus;
1101 /* XXX: Might be wrong, check with EJTAG spec. */
1102 return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1103 (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1106 #if defined(TARGET_MIPS64)
1107 target_ulong helper_dmfc0_tcrestart(CPUMIPSState *env)
1109 return env->active_tc.PC;
1112 target_ulong helper_dmfc0_tchalt(CPUMIPSState *env)
1114 return env->active_tc.CP0_TCHalt;
1117 target_ulong helper_dmfc0_tccontext(CPUMIPSState *env)
1119 return env->active_tc.CP0_TCContext;
1122 target_ulong helper_dmfc0_tcschedule(CPUMIPSState *env)
1124 return env->active_tc.CP0_TCSchedule;
1127 target_ulong helper_dmfc0_tcschefback(CPUMIPSState *env)
1129 return env->active_tc.CP0_TCScheFBack;
1132 target_ulong helper_dmfc0_lladdr(CPUMIPSState *env)
1134 return env->lladdr >> env->CP0_LLAddr_shift;
1137 target_ulong helper_dmfc0_watchlo(CPUMIPSState *env, uint32_t sel)
1139 return env->CP0_WatchLo[sel];
1141 #endif /* TARGET_MIPS64 */
1143 void helper_mtc0_index(CPUMIPSState *env, target_ulong arg1)
1146 unsigned int tmp = env->tlb->nb_tlb;
1152 env->CP0_Index = (env->CP0_Index & 0x80000000) | (arg1 & (num - 1));
1155 void helper_mtc0_mvpcontrol(CPUMIPSState *env, target_ulong arg1)
1160 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
1161 mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
1162 (1 << CP0MVPCo_EVP);
1163 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1164 mask |= (1 << CP0MVPCo_STLB);
1165 newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
1167 // TODO: Enable/disable shared TLB, enable/disable VPEs.
1169 env->mvp->CP0_MVPControl = newval;
1172 void helper_mtc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
1177 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
1178 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
1179 newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
1181 /* Yield scheduler intercept not implemented. */
1182 /* Gating storage scheduler intercept not implemented. */
1184 // TODO: Enable/disable TCs.
1186 env->CP0_VPEControl = newval;
1189 void helper_mttc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
1191 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1192 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1196 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
1197 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
1198 newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask);
1200 /* TODO: Enable/disable TCs. */
1202 other->CP0_VPEControl = newval;
1205 target_ulong helper_mftc0_vpecontrol(CPUMIPSState *env)
1207 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1208 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1209 /* FIXME: Mask away return zero on read bits. */
1210 return other->CP0_VPEControl;
1213 target_ulong helper_mftc0_vpeconf0(CPUMIPSState *env)
1215 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1216 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1218 return other->CP0_VPEConf0;
1221 void helper_mtc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
1226 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
1227 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
1228 mask |= (0xff << CP0VPEC0_XTC);
1229 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1231 newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1233 // TODO: TC exclusive handling due to ERL/EXL.
1235 env->CP0_VPEConf0 = newval;
1238 void helper_mttc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
1240 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1241 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1245 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1246 newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1248 /* TODO: TC exclusive handling due to ERL/EXL. */
1249 other->CP0_VPEConf0 = newval;
1252 void helper_mtc0_vpeconf1(CPUMIPSState *env, target_ulong arg1)
1257 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1258 mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
1259 (0xff << CP0VPEC1_NCP1);
1260 newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
1262 /* UDI not implemented. */
1263 /* CP2 not implemented. */
1265 // TODO: Handle FPU (CP1) binding.
1267 env->CP0_VPEConf1 = newval;
1270 void helper_mtc0_yqmask(CPUMIPSState *env, target_ulong arg1)
1272 /* Yield qualifier inputs not implemented. */
1273 env->CP0_YQMask = 0x00000000;
1276 void helper_mtc0_vpeopt(CPUMIPSState *env, target_ulong arg1)
1278 env->CP0_VPEOpt = arg1 & 0x0000ffff;
1281 void helper_mtc0_entrylo0(CPUMIPSState *env, target_ulong arg1)
1283 /* Large physaddr (PABITS) not implemented */
1284 /* 1k pages not implemented */
1285 env->CP0_EntryLo0 = arg1 & 0x3FFFFFFF;
1288 void helper_mtc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
1290 uint32_t mask = env->CP0_TCStatus_rw_bitmask;
1293 newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
1295 env->active_tc.CP0_TCStatus = newval;
1296 sync_c0_tcstatus(env, env->current_tc, newval);
1299 void helper_mttc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
1301 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1302 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1304 if (other_tc == other->current_tc)
1305 other->active_tc.CP0_TCStatus = arg1;
1307 other->tcs[other_tc].CP0_TCStatus = arg1;
1308 sync_c0_tcstatus(other, other_tc, arg1);
1311 void helper_mtc0_tcbind(CPUMIPSState *env, target_ulong arg1)
1313 uint32_t mask = (1 << CP0TCBd_TBE);
1316 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1317 mask |= (1 << CP0TCBd_CurVPE);
1318 newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1319 env->active_tc.CP0_TCBind = newval;
1322 void helper_mttc0_tcbind(CPUMIPSState *env, target_ulong arg1)
1324 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1325 uint32_t mask = (1 << CP0TCBd_TBE);
1327 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1329 if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1330 mask |= (1 << CP0TCBd_CurVPE);
1331 if (other_tc == other->current_tc) {
1332 newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1333 other->active_tc.CP0_TCBind = newval;
1335 newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
1336 other->tcs[other_tc].CP0_TCBind = newval;
1340 void helper_mtc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
1342 env->active_tc.PC = arg1;
1343 env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1345 /* MIPS16 not implemented. */
1348 void helper_mttc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
1350 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1351 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1353 if (other_tc == other->current_tc) {
1354 other->active_tc.PC = arg1;
1355 other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1356 other->lladdr = 0ULL;
1357 /* MIPS16 not implemented. */
1359 other->tcs[other_tc].PC = arg1;
1360 other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1361 other->lladdr = 0ULL;
1362 /* MIPS16 not implemented. */
1366 void helper_mtc0_tchalt(CPUMIPSState *env, target_ulong arg1)
1368 MIPSCPU *cpu = mips_env_get_cpu(env);
1370 env->active_tc.CP0_TCHalt = arg1 & 0x1;
1372 // TODO: Halt TC / Restart (if allocated+active) TC.
1373 if (env->active_tc.CP0_TCHalt & 1) {
1374 mips_tc_sleep(cpu, env->current_tc);
1376 mips_tc_wake(cpu, env->current_tc);
1380 void helper_mttc0_tchalt(CPUMIPSState *env, target_ulong arg1)
1382 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1383 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1384 MIPSCPU *other_cpu = mips_env_get_cpu(other);
1386 // TODO: Halt TC / Restart (if allocated+active) TC.
1388 if (other_tc == other->current_tc)
1389 other->active_tc.CP0_TCHalt = arg1;
1391 other->tcs[other_tc].CP0_TCHalt = arg1;
1394 mips_tc_sleep(other_cpu, other_tc);
1396 mips_tc_wake(other_cpu, other_tc);
1400 void helper_mtc0_tccontext(CPUMIPSState *env, target_ulong arg1)
1402 env->active_tc.CP0_TCContext = arg1;
1405 void helper_mttc0_tccontext(CPUMIPSState *env, target_ulong arg1)
1407 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1408 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1410 if (other_tc == other->current_tc)
1411 other->active_tc.CP0_TCContext = arg1;
1413 other->tcs[other_tc].CP0_TCContext = arg1;
1416 void helper_mtc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
1418 env->active_tc.CP0_TCSchedule = arg1;
1421 void helper_mttc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
1423 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1424 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1426 if (other_tc == other->current_tc)
1427 other->active_tc.CP0_TCSchedule = arg1;
1429 other->tcs[other_tc].CP0_TCSchedule = arg1;
1432 void helper_mtc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
1434 env->active_tc.CP0_TCScheFBack = arg1;
1437 void helper_mttc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
1439 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1440 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1442 if (other_tc == other->current_tc)
1443 other->active_tc.CP0_TCScheFBack = arg1;
1445 other->tcs[other_tc].CP0_TCScheFBack = arg1;
1448 void helper_mtc0_entrylo1(CPUMIPSState *env, target_ulong arg1)
1450 /* Large physaddr (PABITS) not implemented */
1451 /* 1k pages not implemented */
1452 env->CP0_EntryLo1 = arg1 & 0x3FFFFFFF;
1455 void helper_mtc0_context(CPUMIPSState *env, target_ulong arg1)
1457 env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
1460 void helper_mtc0_pagemask(CPUMIPSState *env, target_ulong arg1)
1462 /* 1k pages not implemented */
1463 env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
1466 void helper_mtc0_pagegrain(CPUMIPSState *env, target_ulong arg1)
1468 /* SmartMIPS not implemented */
1469 /* Large physaddr (PABITS) not implemented */
1470 /* 1k pages not implemented */
1471 env->CP0_PageGrain = 0;
1474 void helper_mtc0_wired(CPUMIPSState *env, target_ulong arg1)
1476 env->CP0_Wired = arg1 % env->tlb->nb_tlb;
1479 void helper_mtc0_srsconf0(CPUMIPSState *env, target_ulong arg1)
1481 env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
1484 void helper_mtc0_srsconf1(CPUMIPSState *env, target_ulong arg1)
1486 env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
1489 void helper_mtc0_srsconf2(CPUMIPSState *env, target_ulong arg1)
1491 env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
1494 void helper_mtc0_srsconf3(CPUMIPSState *env, target_ulong arg1)
1496 env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
1499 void helper_mtc0_srsconf4(CPUMIPSState *env, target_ulong arg1)
1501 env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
1504 void helper_mtc0_hwrena(CPUMIPSState *env, target_ulong arg1)
1506 env->CP0_HWREna = arg1 & 0x0000000F;
1509 void helper_mtc0_count(CPUMIPSState *env, target_ulong arg1)
1511 cpu_mips_store_count(env, arg1);
1514 void helper_mtc0_entryhi(CPUMIPSState *env, target_ulong arg1)
1516 target_ulong old, val;
1518 /* 1k pages not implemented */
1519 val = arg1 & ((TARGET_PAGE_MASK << 1) | 0xFF);
1520 #if defined(TARGET_MIPS64)
1521 val &= env->SEGMask;
1523 old = env->CP0_EntryHi;
1524 env->CP0_EntryHi = val;
1525 if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1526 sync_c0_entryhi(env, env->current_tc);
1528 /* If the ASID changes, flush qemu's TLB. */
1529 if ((old & 0xFF) != (val & 0xFF))
1530 cpu_mips_tlb_flush(env, 1);
1533 void helper_mttc0_entryhi(CPUMIPSState *env, target_ulong arg1)
1535 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1536 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1538 other->CP0_EntryHi = arg1;
1539 sync_c0_entryhi(other, other_tc);
1542 void helper_mtc0_compare(CPUMIPSState *env, target_ulong arg1)
1544 cpu_mips_store_compare(env, arg1);
1547 void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1)
1550 uint32_t mask = env->CP0_Status_rw_bitmask;
1553 old = env->CP0_Status;
1554 env->CP0_Status = (env->CP0_Status & ~mask) | val;
1555 if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1556 sync_c0_status(env, env, env->current_tc);
1558 compute_hflags(env);
1561 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1562 qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1563 old, old & env->CP0_Cause & CP0Ca_IP_mask,
1564 val, val & env->CP0_Cause & CP0Ca_IP_mask,
1566 switch (env->hflags & MIPS_HFLAG_KSU) {
1567 case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1568 case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1569 case MIPS_HFLAG_KM: qemu_log("\n"); break;
1570 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1575 void helper_mttc0_status(CPUMIPSState *env, target_ulong arg1)
1577 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1578 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1580 other->CP0_Status = arg1 & ~0xf1000018;
1581 sync_c0_status(env, other, other_tc);
1584 void helper_mtc0_intctl(CPUMIPSState *env, target_ulong arg1)
1586 /* vectored interrupts not implemented, no performance counters. */
1587 env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0);
1590 void helper_mtc0_srsctl(CPUMIPSState *env, target_ulong arg1)
1592 uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1593 env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
1596 static void mtc0_cause(CPUMIPSState *cpu, target_ulong arg1)
1598 uint32_t mask = 0x00C00300;
1599 uint32_t old = cpu->CP0_Cause;
1602 if (cpu->insn_flags & ISA_MIPS32R2) {
1603 mask |= 1 << CP0Ca_DC;
1606 cpu->CP0_Cause = (cpu->CP0_Cause & ~mask) | (arg1 & mask);
1608 if ((old ^ cpu->CP0_Cause) & (1 << CP0Ca_DC)) {
1609 if (cpu->CP0_Cause & (1 << CP0Ca_DC)) {
1610 cpu_mips_stop_count(cpu);
1612 cpu_mips_start_count(cpu);
1616 /* Set/reset software interrupts */
1617 for (i = 0 ; i < 2 ; i++) {
1618 if ((old ^ cpu->CP0_Cause) & (1 << (CP0Ca_IP + i))) {
1619 cpu_mips_soft_irq(cpu, i, cpu->CP0_Cause & (1 << (CP0Ca_IP + i)));
1624 void helper_mtc0_cause(CPUMIPSState *env, target_ulong arg1)
1626 mtc0_cause(env, arg1);
1629 void helper_mttc0_cause(CPUMIPSState *env, target_ulong arg1)
1631 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1632 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1634 mtc0_cause(other, arg1);
1637 target_ulong helper_mftc0_epc(CPUMIPSState *env)
1639 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1640 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1642 return other->CP0_EPC;
1645 target_ulong helper_mftc0_ebase(CPUMIPSState *env)
1647 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1648 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1650 return other->CP0_EBase;
1653 void helper_mtc0_ebase(CPUMIPSState *env, target_ulong arg1)
1655 /* vectored interrupts not implemented */
1656 env->CP0_EBase = (env->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
1659 void helper_mttc0_ebase(CPUMIPSState *env, target_ulong arg1)
1661 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1662 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1663 other->CP0_EBase = (other->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
1666 target_ulong helper_mftc0_configx(CPUMIPSState *env, target_ulong idx)
1668 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1669 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1672 case 0: return other->CP0_Config0;
1673 case 1: return other->CP0_Config1;
1674 case 2: return other->CP0_Config2;
1675 case 3: return other->CP0_Config3;
1676 /* 4 and 5 are reserved. */
1677 case 6: return other->CP0_Config6;
1678 case 7: return other->CP0_Config7;
1685 void helper_mtc0_config0(CPUMIPSState *env, target_ulong arg1)
1687 env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
1690 void helper_mtc0_config2(CPUMIPSState *env, target_ulong arg1)
1692 /* tertiary/secondary caches not implemented */
1693 env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1696 void helper_mtc0_lladdr(CPUMIPSState *env, target_ulong arg1)
1698 target_long mask = env->CP0_LLAddr_rw_bitmask;
1699 arg1 = arg1 << env->CP0_LLAddr_shift;
1700 env->lladdr = (env->lladdr & ~mask) | (arg1 & mask);
1703 void helper_mtc0_watchlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1705 /* Watch exceptions for instructions, data loads, data stores
1707 env->CP0_WatchLo[sel] = (arg1 & ~0x7);
1710 void helper_mtc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1712 env->CP0_WatchHi[sel] = (arg1 & 0x40FF0FF8);
1713 env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
1716 void helper_mtc0_xcontext(CPUMIPSState *env, target_ulong arg1)
1718 target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1719 env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
1722 void helper_mtc0_framemask(CPUMIPSState *env, target_ulong arg1)
1724 env->CP0_Framemask = arg1; /* XXX */
1727 void helper_mtc0_debug(CPUMIPSState *env, target_ulong arg1)
1729 env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
1730 if (arg1 & (1 << CP0DB_DM))
1731 env->hflags |= MIPS_HFLAG_DM;
1733 env->hflags &= ~MIPS_HFLAG_DM;
1736 void helper_mttc0_debug(CPUMIPSState *env, target_ulong arg1)
1738 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1739 uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1740 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1742 /* XXX: Might be wrong, check with EJTAG spec. */
1743 if (other_tc == other->current_tc)
1744 other->active_tc.CP0_Debug_tcstatus = val;
1746 other->tcs[other_tc].CP0_Debug_tcstatus = val;
1747 other->CP0_Debug = (other->CP0_Debug &
1748 ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1749 (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1752 void helper_mtc0_performance0(CPUMIPSState *env, target_ulong arg1)
1754 env->CP0_Performance0 = arg1 & 0x000007ff;
1757 void helper_mtc0_taglo(CPUMIPSState *env, target_ulong arg1)
1759 env->CP0_TagLo = arg1 & 0xFFFFFCF6;
1762 void helper_mtc0_datalo(CPUMIPSState *env, target_ulong arg1)
1764 env->CP0_DataLo = arg1; /* XXX */
1767 void helper_mtc0_taghi(CPUMIPSState *env, target_ulong arg1)
1769 env->CP0_TagHi = arg1; /* XXX */
1772 void helper_mtc0_datahi(CPUMIPSState *env, target_ulong arg1)
1774 env->CP0_DataHi = arg1; /* XXX */
1777 /* MIPS MT functions */
1778 target_ulong helper_mftgpr(CPUMIPSState *env, uint32_t sel)
1780 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1781 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1783 if (other_tc == other->current_tc)
1784 return other->active_tc.gpr[sel];
1786 return other->tcs[other_tc].gpr[sel];
1789 target_ulong helper_mftlo(CPUMIPSState *env, uint32_t sel)
1791 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1792 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1794 if (other_tc == other->current_tc)
1795 return other->active_tc.LO[sel];
1797 return other->tcs[other_tc].LO[sel];
1800 target_ulong helper_mfthi(CPUMIPSState *env, uint32_t sel)
1802 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1803 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1805 if (other_tc == other->current_tc)
1806 return other->active_tc.HI[sel];
1808 return other->tcs[other_tc].HI[sel];
1811 target_ulong helper_mftacx(CPUMIPSState *env, uint32_t sel)
1813 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1814 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1816 if (other_tc == other->current_tc)
1817 return other->active_tc.ACX[sel];
1819 return other->tcs[other_tc].ACX[sel];
1822 target_ulong helper_mftdsp(CPUMIPSState *env)
1824 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1825 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1827 if (other_tc == other->current_tc)
1828 return other->active_tc.DSPControl;
1830 return other->tcs[other_tc].DSPControl;
1833 void helper_mttgpr(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1835 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1836 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1838 if (other_tc == other->current_tc)
1839 other->active_tc.gpr[sel] = arg1;
1841 other->tcs[other_tc].gpr[sel] = arg1;
1844 void helper_mttlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1846 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1847 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1849 if (other_tc == other->current_tc)
1850 other->active_tc.LO[sel] = arg1;
1852 other->tcs[other_tc].LO[sel] = arg1;
1855 void helper_mtthi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1857 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1858 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1860 if (other_tc == other->current_tc)
1861 other->active_tc.HI[sel] = arg1;
1863 other->tcs[other_tc].HI[sel] = arg1;
1866 void helper_mttacx(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1868 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1869 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1871 if (other_tc == other->current_tc)
1872 other->active_tc.ACX[sel] = arg1;
1874 other->tcs[other_tc].ACX[sel] = arg1;
1877 void helper_mttdsp(CPUMIPSState *env, target_ulong arg1)
1879 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1880 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1882 if (other_tc == other->current_tc)
1883 other->active_tc.DSPControl = arg1;
1885 other->tcs[other_tc].DSPControl = arg1;
1888 /* MIPS MT functions */
1889 target_ulong helper_dmt(void)
1895 target_ulong helper_emt(void)
1901 target_ulong helper_dvpe(CPUMIPSState *env)
1903 CPUMIPSState *other_cpu_env = first_cpu;
1904 target_ulong prev = env->mvp->CP0_MVPControl;
1907 /* Turn off all VPEs except the one executing the dvpe. */
1908 if (other_cpu_env != env) {
1909 MIPSCPU *other_cpu = mips_env_get_cpu(other_cpu_env);
1911 other_cpu_env->mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP);
1912 mips_vpe_sleep(other_cpu);
1914 other_cpu_env = other_cpu_env->next_cpu;
1915 } while (other_cpu_env);
1919 target_ulong helper_evpe(CPUMIPSState *env)
1921 CPUMIPSState *other_cpu_env = first_cpu;
1922 target_ulong prev = env->mvp->CP0_MVPControl;
1925 MIPSCPU *other_cpu = mips_env_get_cpu(other_cpu_env);
1927 if (other_cpu_env != env
1928 /* If the VPE is WFI, don't disturb its sleep. */
1929 && !mips_vpe_is_wfi(other_cpu)) {
1930 /* Enable the VPE. */
1931 other_cpu_env->mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP);
1932 mips_vpe_wake(other_cpu_env); /* And wake it up. */
1934 other_cpu_env = other_cpu_env->next_cpu;
1935 } while (other_cpu_env);
1938 #endif /* !CONFIG_USER_ONLY */
1940 void helper_fork(target_ulong arg1, target_ulong arg2)
1942 // arg1 = rt, arg2 = rs
1944 // TODO: store to TC register
1947 target_ulong helper_yield(CPUMIPSState *env, target_ulong arg)
1949 target_long arg1 = arg;
1952 /* No scheduling policy implemented. */
1954 if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
1955 env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
1956 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1957 env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
1958 helper_raise_exception(env, EXCP_THREAD);
1961 } else if (arg1 == 0) {
1962 if (0 /* TODO: TC underflow */) {
1963 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1964 helper_raise_exception(env, EXCP_THREAD);
1966 // TODO: Deallocate TC
1968 } else if (arg1 > 0) {
1969 /* Yield qualifier inputs not implemented. */
1970 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1971 env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
1972 helper_raise_exception(env, EXCP_THREAD);
1974 return env->CP0_YQMask;
1977 #ifndef CONFIG_USER_ONLY
1978 /* TLB management */
1979 static void cpu_mips_tlb_flush (CPUMIPSState *env, int flush_global)
1981 /* Flush qemu's TLB and discard all shadowed entries. */
1982 tlb_flush (env, flush_global);
1983 env->tlb->tlb_in_use = env->tlb->nb_tlb;
1986 static void r4k_mips_tlb_flush_extra (CPUMIPSState *env, int first)
1988 /* Discard entries from env->tlb[first] onwards. */
1989 while (env->tlb->tlb_in_use > first) {
1990 r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
1994 static void r4k_fill_tlb(CPUMIPSState *env, int idx)
1998 /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
1999 tlb = &env->tlb->mmu.r4k.tlb[idx];
2000 tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
2001 #if defined(TARGET_MIPS64)
2002 tlb->VPN &= env->SEGMask;
2004 tlb->ASID = env->CP0_EntryHi & 0xFF;
2005 tlb->PageMask = env->CP0_PageMask;
2006 tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
2007 tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
2008 tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
2009 tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
2010 tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
2011 tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
2012 tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
2013 tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
2014 tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
2017 void r4k_helper_tlbwi(CPUMIPSState *env)
2021 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
2023 /* Discard cached TLB entries. We could avoid doing this if the
2024 tlbwi is just upgrading access permissions on the current entry;
2025 that might be a further win. */
2026 r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
2028 r4k_invalidate_tlb(env, idx, 0);
2029 r4k_fill_tlb(env, idx);
2032 void r4k_helper_tlbwr(CPUMIPSState *env)
2034 int r = cpu_mips_get_random(env);
2036 r4k_invalidate_tlb(env, r, 1);
2037 r4k_fill_tlb(env, r);
2040 void r4k_helper_tlbp(CPUMIPSState *env)
2049 ASID = env->CP0_EntryHi & 0xFF;
2050 for (i = 0; i < env->tlb->nb_tlb; i++) {
2051 tlb = &env->tlb->mmu.r4k.tlb[i];
2052 /* 1k pages are not supported. */
2053 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
2054 tag = env->CP0_EntryHi & ~mask;
2055 VPN = tlb->VPN & ~mask;
2056 /* Check ASID, virtual page number & size */
2057 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
2063 if (i == env->tlb->nb_tlb) {
2064 /* No match. Discard any shadow entries, if any of them match. */
2065 for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
2066 tlb = &env->tlb->mmu.r4k.tlb[i];
2067 /* 1k pages are not supported. */
2068 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
2069 tag = env->CP0_EntryHi & ~mask;
2070 VPN = tlb->VPN & ~mask;
2071 /* Check ASID, virtual page number & size */
2072 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
2073 r4k_mips_tlb_flush_extra (env, i);
2078 env->CP0_Index |= 0x80000000;
2082 void r4k_helper_tlbr(CPUMIPSState *env)
2088 ASID = env->CP0_EntryHi & 0xFF;
2089 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
2090 tlb = &env->tlb->mmu.r4k.tlb[idx];
2092 /* If this will change the current ASID, flush qemu's TLB. */
2093 if (ASID != tlb->ASID)
2094 cpu_mips_tlb_flush (env, 1);
2096 r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
2098 env->CP0_EntryHi = tlb->VPN | tlb->ASID;
2099 env->CP0_PageMask = tlb->PageMask;
2100 env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
2101 (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
2102 env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
2103 (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
2106 void helper_tlbwi(CPUMIPSState *env)
2108 env->tlb->helper_tlbwi(env);
2111 void helper_tlbwr(CPUMIPSState *env)
2113 env->tlb->helper_tlbwr(env);
2116 void helper_tlbp(CPUMIPSState *env)
2118 env->tlb->helper_tlbp(env);
2121 void helper_tlbr(CPUMIPSState *env)
2123 env->tlb->helper_tlbr(env);
2127 target_ulong helper_di(CPUMIPSState *env)
2129 target_ulong t0 = env->CP0_Status;
2131 env->CP0_Status = t0 & ~(1 << CP0St_IE);
2135 target_ulong helper_ei(CPUMIPSState *env)
2137 target_ulong t0 = env->CP0_Status;
2139 env->CP0_Status = t0 | (1 << CP0St_IE);
2143 static void debug_pre_eret(CPUMIPSState *env)
2145 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
2146 qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
2147 env->active_tc.PC, env->CP0_EPC);
2148 if (env->CP0_Status & (1 << CP0St_ERL))
2149 qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
2150 if (env->hflags & MIPS_HFLAG_DM)
2151 qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
2156 static void debug_post_eret(CPUMIPSState *env)
2158 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
2159 qemu_log(" => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
2160 env->active_tc.PC, env->CP0_EPC);
2161 if (env->CP0_Status & (1 << CP0St_ERL))
2162 qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
2163 if (env->hflags & MIPS_HFLAG_DM)
2164 qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
2165 switch (env->hflags & MIPS_HFLAG_KSU) {
2166 case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
2167 case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
2168 case MIPS_HFLAG_KM: qemu_log("\n"); break;
2169 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
2174 static void set_pc(CPUMIPSState *env, target_ulong error_pc)
2176 env->active_tc.PC = error_pc & ~(target_ulong)1;
2178 env->hflags |= MIPS_HFLAG_M16;
2180 env->hflags &= ~(MIPS_HFLAG_M16);
2184 void helper_eret(CPUMIPSState *env)
2186 debug_pre_eret(env);
2187 if (env->CP0_Status & (1 << CP0St_ERL)) {
2188 set_pc(env, env->CP0_ErrorEPC);
2189 env->CP0_Status &= ~(1 << CP0St_ERL);
2191 set_pc(env, env->CP0_EPC);
2192 env->CP0_Status &= ~(1 << CP0St_EXL);
2194 compute_hflags(env);
2195 debug_post_eret(env);
2199 void helper_deret(CPUMIPSState *env)
2201 debug_pre_eret(env);
2202 set_pc(env, env->CP0_DEPC);
2204 env->hflags &= MIPS_HFLAG_DM;
2205 compute_hflags(env);
2206 debug_post_eret(env);
2209 #endif /* !CONFIG_USER_ONLY */
2211 target_ulong helper_rdhwr_cpunum(CPUMIPSState *env)
2213 if ((env->hflags & MIPS_HFLAG_CP0) ||
2214 (env->CP0_HWREna & (1 << 0)))
2215 return env->CP0_EBase & 0x3ff;
2217 helper_raise_exception(env, EXCP_RI);
2222 target_ulong helper_rdhwr_synci_step(CPUMIPSState *env)
2224 if ((env->hflags & MIPS_HFLAG_CP0) ||
2225 (env->CP0_HWREna & (1 << 1)))
2226 return env->SYNCI_Step;
2228 helper_raise_exception(env, EXCP_RI);
2233 target_ulong helper_rdhwr_cc(CPUMIPSState *env)
2235 if ((env->hflags & MIPS_HFLAG_CP0) ||
2236 (env->CP0_HWREna & (1 << 2)))
2237 return env->CP0_Count;
2239 helper_raise_exception(env, EXCP_RI);
2244 target_ulong helper_rdhwr_ccres(CPUMIPSState *env)
2246 if ((env->hflags & MIPS_HFLAG_CP0) ||
2247 (env->CP0_HWREna & (1 << 3)))
2250 helper_raise_exception(env, EXCP_RI);
2255 void helper_pmon(CPUMIPSState *env, int function)
2259 case 2: /* TODO: char inbyte(int waitflag); */
2260 if (env->active_tc.gpr[4] == 0)
2261 env->active_tc.gpr[2] = -1;
2263 case 11: /* TODO: char inbyte (void); */
2264 env->active_tc.gpr[2] = -1;
2268 printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
2274 unsigned char *fmt = (void *)(uintptr_t)env->active_tc.gpr[4];
2281 void helper_wait(CPUMIPSState *env)
2284 cpu_reset_interrupt(env, CPU_INTERRUPT_WAKE);
2285 helper_raise_exception(env, EXCP_HLT);
2288 #if !defined(CONFIG_USER_ONLY)
2290 static void QEMU_NORETURN do_unaligned_access(CPUMIPSState *env,
2291 target_ulong addr, int is_write,
2292 int is_user, uintptr_t retaddr);
2294 #define MMUSUFFIX _mmu
2295 #define ALIGNED_ONLY
2298 #include "softmmu_template.h"
2301 #include "softmmu_template.h"
2304 #include "softmmu_template.h"
2307 #include "softmmu_template.h"
2309 static void do_unaligned_access(CPUMIPSState *env, target_ulong addr,
2310 int is_write, int is_user, uintptr_t retaddr)
2312 env->CP0_BadVAddr = addr;
2313 do_raise_exception(env, (is_write == 1) ? EXCP_AdES : EXCP_AdEL, retaddr);
2316 void tlb_fill(CPUMIPSState *env, target_ulong addr, int is_write, int mmu_idx,
2321 ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx);
2323 do_raise_exception_err(env, env->exception_index,
2324 env->error_code, retaddr);
2328 void cpu_unassigned_access(CPUMIPSState *env, hwaddr addr,
2329 int is_write, int is_exec, int unused, int size)
2332 helper_raise_exception(env, EXCP_IBE);
2334 helper_raise_exception(env, EXCP_DBE);
2336 #endif /* !CONFIG_USER_ONLY */
2338 /* Complex FPU operations which may need stack space. */
2340 #define FLOAT_TWO32 make_float32(1 << 30)
2341 #define FLOAT_TWO64 make_float64(1ULL << 62)
2342 #define FP_TO_INT32_OVERFLOW 0x7fffffff
2343 #define FP_TO_INT64_OVERFLOW 0x7fffffffffffffffULL
2345 /* convert MIPS rounding mode in FCR31 to IEEE library */
2346 static unsigned int ieee_rm[] = {
2347 float_round_nearest_even,
2348 float_round_to_zero,
2353 #define RESTORE_ROUNDING_MODE \
2354 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
2356 #define RESTORE_FLUSH_MODE \
2357 set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0, &env->active_fpu.fp_status);
2359 target_ulong helper_cfc1(CPUMIPSState *env, uint32_t reg)
2365 arg1 = (int32_t)env->active_fpu.fcr0;
2368 arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1);
2371 arg1 = env->active_fpu.fcr31 & 0x0003f07c;
2374 arg1 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4);
2377 arg1 = (int32_t)env->active_fpu.fcr31;
2384 void helper_ctc1(CPUMIPSState *env, target_ulong arg1, uint32_t reg)
2388 if (arg1 & 0xffffff00)
2390 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) |
2391 ((arg1 & 0x1) << 23);
2394 if (arg1 & 0x007c0000)
2396 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c);
2399 if (arg1 & 0x007c0000)
2401 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) |
2402 ((arg1 & 0x4) << 22);
2405 if (arg1 & 0x007c0000)
2407 env->active_fpu.fcr31 = arg1;
2412 /* set rounding mode */
2413 RESTORE_ROUNDING_MODE;
2414 /* set flush-to-zero mode */
2416 set_float_exception_flags(0, &env->active_fpu.fp_status);
2417 if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
2418 do_raise_exception(env, EXCP_FPE, GETPC());
2421 static inline int ieee_ex_to_mips(int xcpt)
2425 if (xcpt & float_flag_invalid) {
2428 if (xcpt & float_flag_overflow) {
2431 if (xcpt & float_flag_underflow) {
2432 ret |= FP_UNDERFLOW;
2434 if (xcpt & float_flag_divbyzero) {
2437 if (xcpt & float_flag_inexact) {
2444 static inline void update_fcr31(CPUMIPSState *env, uintptr_t pc)
2446 int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status));
2448 SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
2451 set_float_exception_flags(0, &env->active_fpu.fp_status);
2453 if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp) {
2454 do_raise_exception(env, EXCP_FPE, pc);
2456 UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
2462 Single precition routines have a "s" suffix, double precision a
2463 "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2464 paired single lower "pl", paired single upper "pu". */
2466 /* unary operations, modifying fp status */
2467 uint64_t helper_float_sqrt_d(CPUMIPSState *env, uint64_t fdt0)
2469 fdt0 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2470 update_fcr31(env, GETPC());
2474 uint32_t helper_float_sqrt_s(CPUMIPSState *env, uint32_t fst0)
2476 fst0 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2477 update_fcr31(env, GETPC());
2481 uint64_t helper_float_cvtd_s(CPUMIPSState *env, uint32_t fst0)
2485 fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
2486 update_fcr31(env, GETPC());
2490 uint64_t helper_float_cvtd_w(CPUMIPSState *env, uint32_t wt0)
2494 fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
2495 update_fcr31(env, GETPC());
2499 uint64_t helper_float_cvtd_l(CPUMIPSState *env, uint64_t dt0)
2503 fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
2504 update_fcr31(env, GETPC());
2508 uint64_t helper_float_cvtl_d(CPUMIPSState *env, uint64_t fdt0)
2512 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2513 if (get_float_exception_flags(&env->active_fpu.fp_status)
2514 & (float_flag_invalid | float_flag_overflow)) {
2515 dt2 = FP_TO_INT64_OVERFLOW;
2517 update_fcr31(env, GETPC());
2521 uint64_t helper_float_cvtl_s(CPUMIPSState *env, uint32_t fst0)
2525 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2526 if (get_float_exception_flags(&env->active_fpu.fp_status)
2527 & (float_flag_invalid | float_flag_overflow)) {
2528 dt2 = FP_TO_INT64_OVERFLOW;
2530 update_fcr31(env, GETPC());
2534 uint64_t helper_float_cvtps_pw(CPUMIPSState *env, uint64_t dt0)
2539 fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2540 fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
2541 update_fcr31(env, GETPC());
2542 return ((uint64_t)fsth2 << 32) | fst2;
2545 uint64_t helper_float_cvtpw_ps(CPUMIPSState *env, uint64_t fdt0)
2551 wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2552 excp = get_float_exception_flags(&env->active_fpu.fp_status);
2553 if (excp & (float_flag_overflow | float_flag_invalid)) {
2554 wt2 = FP_TO_INT32_OVERFLOW;
2557 set_float_exception_flags(0, &env->active_fpu.fp_status);
2558 wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
2559 excph = get_float_exception_flags(&env->active_fpu.fp_status);
2560 if (excph & (float_flag_overflow | float_flag_invalid)) {
2561 wth2 = FP_TO_INT32_OVERFLOW;
2564 set_float_exception_flags(excp | excph, &env->active_fpu.fp_status);
2565 update_fcr31(env, GETPC());
2567 return ((uint64_t)wth2 << 32) | wt2;
2570 uint32_t helper_float_cvts_d(CPUMIPSState *env, uint64_t fdt0)
2574 fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
2575 update_fcr31(env, GETPC());
2579 uint32_t helper_float_cvts_w(CPUMIPSState *env, uint32_t wt0)
2583 fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
2584 update_fcr31(env, GETPC());
2588 uint32_t helper_float_cvts_l(CPUMIPSState *env, uint64_t dt0)
2592 fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
2593 update_fcr31(env, GETPC());
2597 uint32_t helper_float_cvts_pl(CPUMIPSState *env, uint32_t wt0)
2602 update_fcr31(env, GETPC());
2606 uint32_t helper_float_cvts_pu(CPUMIPSState *env, uint32_t wth0)
2611 update_fcr31(env, GETPC());
2615 uint32_t helper_float_cvtw_s(CPUMIPSState *env, uint32_t fst0)
2619 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2620 update_fcr31(env, GETPC());
2621 if (get_float_exception_flags(&env->active_fpu.fp_status)
2622 & (float_flag_invalid | float_flag_overflow)) {
2623 wt2 = FP_TO_INT32_OVERFLOW;
2628 uint32_t helper_float_cvtw_d(CPUMIPSState *env, uint64_t fdt0)
2632 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2633 if (get_float_exception_flags(&env->active_fpu.fp_status)
2634 & (float_flag_invalid | float_flag_overflow)) {
2635 wt2 = FP_TO_INT32_OVERFLOW;
2637 update_fcr31(env, GETPC());
2641 uint64_t helper_float_roundl_d(CPUMIPSState *env, uint64_t fdt0)
2645 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2646 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2647 RESTORE_ROUNDING_MODE;
2648 if (get_float_exception_flags(&env->active_fpu.fp_status)
2649 & (float_flag_invalid | float_flag_overflow)) {
2650 dt2 = FP_TO_INT64_OVERFLOW;
2652 update_fcr31(env, GETPC());
2656 uint64_t helper_float_roundl_s(CPUMIPSState *env, uint32_t fst0)
2660 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2661 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2662 RESTORE_ROUNDING_MODE;
2663 if (get_float_exception_flags(&env->active_fpu.fp_status)
2664 & (float_flag_invalid | float_flag_overflow)) {
2665 dt2 = FP_TO_INT64_OVERFLOW;
2667 update_fcr31(env, GETPC());
2671 uint32_t helper_float_roundw_d(CPUMIPSState *env, uint64_t fdt0)
2675 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2676 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2677 RESTORE_ROUNDING_MODE;
2678 if (get_float_exception_flags(&env->active_fpu.fp_status)
2679 & (float_flag_invalid | float_flag_overflow)) {
2680 wt2 = FP_TO_INT32_OVERFLOW;
2682 update_fcr31(env, GETPC());
2686 uint32_t helper_float_roundw_s(CPUMIPSState *env, uint32_t fst0)
2690 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2691 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2692 RESTORE_ROUNDING_MODE;
2693 if (get_float_exception_flags(&env->active_fpu.fp_status)
2694 & (float_flag_invalid | float_flag_overflow)) {
2695 wt2 = FP_TO_INT32_OVERFLOW;
2697 update_fcr31(env, GETPC());
2701 uint64_t helper_float_truncl_d(CPUMIPSState *env, uint64_t fdt0)
2705 dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
2706 if (get_float_exception_flags(&env->active_fpu.fp_status)
2707 & (float_flag_invalid | float_flag_overflow)) {
2708 dt2 = FP_TO_INT64_OVERFLOW;
2710 update_fcr31(env, GETPC());
2714 uint64_t helper_float_truncl_s(CPUMIPSState *env, uint32_t fst0)
2718 dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
2719 if (get_float_exception_flags(&env->active_fpu.fp_status)
2720 & (float_flag_invalid | float_flag_overflow)) {
2721 dt2 = FP_TO_INT64_OVERFLOW;
2723 update_fcr31(env, GETPC());
2727 uint32_t helper_float_truncw_d(CPUMIPSState *env, uint64_t fdt0)
2731 wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
2732 if (get_float_exception_flags(&env->active_fpu.fp_status)
2733 & (float_flag_invalid | float_flag_overflow)) {
2734 wt2 = FP_TO_INT32_OVERFLOW;
2736 update_fcr31(env, GETPC());
2740 uint32_t helper_float_truncw_s(CPUMIPSState *env, uint32_t fst0)
2744 wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
2745 if (get_float_exception_flags(&env->active_fpu.fp_status)
2746 & (float_flag_invalid | float_flag_overflow)) {
2747 wt2 = FP_TO_INT32_OVERFLOW;
2749 update_fcr31(env, GETPC());
2753 uint64_t helper_float_ceill_d(CPUMIPSState *env, uint64_t fdt0)
2757 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2758 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2759 RESTORE_ROUNDING_MODE;
2760 if (get_float_exception_flags(&env->active_fpu.fp_status)
2761 & (float_flag_invalid | float_flag_overflow)) {
2762 dt2 = FP_TO_INT64_OVERFLOW;
2764 update_fcr31(env, GETPC());
2768 uint64_t helper_float_ceill_s(CPUMIPSState *env, uint32_t fst0)
2772 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2773 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2774 RESTORE_ROUNDING_MODE;
2775 if (get_float_exception_flags(&env->active_fpu.fp_status)
2776 & (float_flag_invalid | float_flag_overflow)) {
2777 dt2 = FP_TO_INT64_OVERFLOW;
2779 update_fcr31(env, GETPC());
2783 uint32_t helper_float_ceilw_d(CPUMIPSState *env, uint64_t fdt0)
2787 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2788 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2789 RESTORE_ROUNDING_MODE;
2790 if (get_float_exception_flags(&env->active_fpu.fp_status)
2791 & (float_flag_invalid | float_flag_overflow)) {
2792 wt2 = FP_TO_INT32_OVERFLOW;
2794 update_fcr31(env, GETPC());
2798 uint32_t helper_float_ceilw_s(CPUMIPSState *env, uint32_t fst0)
2802 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2803 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2804 RESTORE_ROUNDING_MODE;
2805 if (get_float_exception_flags(&env->active_fpu.fp_status)
2806 & (float_flag_invalid | float_flag_overflow)) {
2807 wt2 = FP_TO_INT32_OVERFLOW;
2809 update_fcr31(env, GETPC());
2813 uint64_t helper_float_floorl_d(CPUMIPSState *env, uint64_t fdt0)
2817 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2818 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2819 RESTORE_ROUNDING_MODE;
2820 if (get_float_exception_flags(&env->active_fpu.fp_status)
2821 & (float_flag_invalid | float_flag_overflow)) {
2822 dt2 = FP_TO_INT64_OVERFLOW;
2824 update_fcr31(env, GETPC());
2828 uint64_t helper_float_floorl_s(CPUMIPSState *env, uint32_t fst0)
2832 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2833 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2834 RESTORE_ROUNDING_MODE;
2835 if (get_float_exception_flags(&env->active_fpu.fp_status)
2836 & (float_flag_invalid | float_flag_overflow)) {
2837 dt2 = FP_TO_INT64_OVERFLOW;
2839 update_fcr31(env, GETPC());
2843 uint32_t helper_float_floorw_d(CPUMIPSState *env, uint64_t fdt0)
2847 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2848 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2849 RESTORE_ROUNDING_MODE;
2850 if (get_float_exception_flags(&env->active_fpu.fp_status)
2851 & (float_flag_invalid | float_flag_overflow)) {
2852 wt2 = FP_TO_INT32_OVERFLOW;
2854 update_fcr31(env, GETPC());
2858 uint32_t helper_float_floorw_s(CPUMIPSState *env, uint32_t fst0)
2862 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2863 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2864 RESTORE_ROUNDING_MODE;
2865 if (get_float_exception_flags(&env->active_fpu.fp_status)
2866 & (float_flag_invalid | float_flag_overflow)) {
2867 wt2 = FP_TO_INT32_OVERFLOW;
2869 update_fcr31(env, GETPC());
2873 /* unary operations, not modifying fp status */
2874 #define FLOAT_UNOP(name) \
2875 uint64_t helper_float_ ## name ## _d(uint64_t fdt0) \
2877 return float64_ ## name(fdt0); \
2879 uint32_t helper_float_ ## name ## _s(uint32_t fst0) \
2881 return float32_ ## name(fst0); \
2883 uint64_t helper_float_ ## name ## _ps(uint64_t fdt0) \
2888 wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF); \
2889 wth0 = float32_ ## name(fdt0 >> 32); \
2890 return ((uint64_t)wth0 << 32) | wt0; \
2896 /* MIPS specific unary operations */
2897 uint64_t helper_float_recip_d(CPUMIPSState *env, uint64_t fdt0)
2901 fdt2 = float64_div(float64_one, fdt0, &env->active_fpu.fp_status);
2902 update_fcr31(env, GETPC());
2906 uint32_t helper_float_recip_s(CPUMIPSState *env, uint32_t fst0)
2910 fst2 = float32_div(float32_one, fst0, &env->active_fpu.fp_status);
2911 update_fcr31(env, GETPC());
2915 uint64_t helper_float_rsqrt_d(CPUMIPSState *env, uint64_t fdt0)
2919 fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2920 fdt2 = float64_div(float64_one, fdt2, &env->active_fpu.fp_status);
2921 update_fcr31(env, GETPC());
2925 uint32_t helper_float_rsqrt_s(CPUMIPSState *env, uint32_t fst0)
2929 fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2930 fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status);
2931 update_fcr31(env, GETPC());
2935 uint64_t helper_float_recip1_d(CPUMIPSState *env, uint64_t fdt0)
2939 fdt2 = float64_div(float64_one, fdt0, &env->active_fpu.fp_status);
2940 update_fcr31(env, GETPC());
2944 uint32_t helper_float_recip1_s(CPUMIPSState *env, uint32_t fst0)
2948 fst2 = float32_div(float32_one, fst0, &env->active_fpu.fp_status);
2949 update_fcr31(env, GETPC());
2953 uint64_t helper_float_recip1_ps(CPUMIPSState *env, uint64_t fdt0)
2958 fst2 = float32_div(float32_one, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2959 fsth2 = float32_div(float32_one, fdt0 >> 32, &env->active_fpu.fp_status);
2960 update_fcr31(env, GETPC());
2961 return ((uint64_t)fsth2 << 32) | fst2;
2964 uint64_t helper_float_rsqrt1_d(CPUMIPSState *env, uint64_t fdt0)
2968 fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2969 fdt2 = float64_div(float64_one, fdt2, &env->active_fpu.fp_status);
2970 update_fcr31(env, GETPC());
2974 uint32_t helper_float_rsqrt1_s(CPUMIPSState *env, uint32_t fst0)
2978 fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2979 fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status);
2980 update_fcr31(env, GETPC());
2984 uint64_t helper_float_rsqrt1_ps(CPUMIPSState *env, uint64_t fdt0)
2989 fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2990 fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
2991 fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status);
2992 fsth2 = float32_div(float32_one, fsth2, &env->active_fpu.fp_status);
2993 update_fcr31(env, GETPC());
2994 return ((uint64_t)fsth2 << 32) | fst2;
2997 #define FLOAT_OP(name, p) void helper_float_##name##_##p(CPUMIPSState *env)
2999 /* binary operations */
3000 #define FLOAT_BINOP(name) \
3001 uint64_t helper_float_ ## name ## _d(CPUMIPSState *env, \
3002 uint64_t fdt0, uint64_t fdt1) \
3006 dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status); \
3007 update_fcr31(env, GETPC()); \
3011 uint32_t helper_float_ ## name ## _s(CPUMIPSState *env, \
3012 uint32_t fst0, uint32_t fst1) \
3016 wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
3017 update_fcr31(env, GETPC()); \
3021 uint64_t helper_float_ ## name ## _ps(CPUMIPSState *env, \
3025 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
3026 uint32_t fsth0 = fdt0 >> 32; \
3027 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
3028 uint32_t fsth1 = fdt1 >> 32; \
3032 wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
3033 wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status); \
3034 update_fcr31(env, GETPC()); \
3035 return ((uint64_t)wth2 << 32) | wt2; \
3044 /* FMA based operations */
3045 #define FLOAT_FMA(name, type) \
3046 uint64_t helper_float_ ## name ## _d(CPUMIPSState *env, \
3047 uint64_t fdt0, uint64_t fdt1, \
3050 fdt0 = float64_muladd(fdt0, fdt1, fdt2, type, \
3051 &env->active_fpu.fp_status); \
3052 update_fcr31(env, GETPC()); \
3056 uint32_t helper_float_ ## name ## _s(CPUMIPSState *env, \
3057 uint32_t fst0, uint32_t fst1, \
3060 fst0 = float32_muladd(fst0, fst1, fst2, type, \
3061 &env->active_fpu.fp_status); \
3062 update_fcr31(env, GETPC()); \
3066 uint64_t helper_float_ ## name ## _ps(CPUMIPSState *env, \
3067 uint64_t fdt0, uint64_t fdt1, \
3070 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
3071 uint32_t fsth0 = fdt0 >> 32; \
3072 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
3073 uint32_t fsth1 = fdt1 >> 32; \
3074 uint32_t fst2 = fdt2 & 0XFFFFFFFF; \
3075 uint32_t fsth2 = fdt2 >> 32; \
3077 fst0 = float32_muladd(fst0, fst1, fst2, type, \
3078 &env->active_fpu.fp_status); \
3079 fsth0 = float32_muladd(fsth0, fsth1, fsth2, type, \
3080 &env->active_fpu.fp_status); \
3081 update_fcr31(env, GETPC()); \
3082 return ((uint64_t)fsth0 << 32) | fst0; \
3085 FLOAT_FMA(msub, float_muladd_negate_c)
3086 FLOAT_FMA(nmadd, float_muladd_negate_result)
3087 FLOAT_FMA(nmsub, float_muladd_negate_result | float_muladd_negate_c)
3090 /* MIPS specific binary operations */
3091 uint64_t helper_float_recip2_d(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
3093 fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
3094 fdt2 = float64_chs(float64_sub(fdt2, float64_one, &env->active_fpu.fp_status));
3095 update_fcr31(env, GETPC());
3099 uint32_t helper_float_recip2_s(CPUMIPSState *env, uint32_t fst0, uint32_t fst2)
3101 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3102 fst2 = float32_chs(float32_sub(fst2, float32_one, &env->active_fpu.fp_status));
3103 update_fcr31(env, GETPC());
3107 uint64_t helper_float_recip2_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
3109 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3110 uint32_t fsth0 = fdt0 >> 32;
3111 uint32_t fst2 = fdt2 & 0XFFFFFFFF;
3112 uint32_t fsth2 = fdt2 >> 32;
3114 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3115 fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
3116 fst2 = float32_chs(float32_sub(fst2, float32_one, &env->active_fpu.fp_status));
3117 fsth2 = float32_chs(float32_sub(fsth2, float32_one, &env->active_fpu.fp_status));
3118 update_fcr31(env, GETPC());
3119 return ((uint64_t)fsth2 << 32) | fst2;
3122 uint64_t helper_float_rsqrt2_d(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
3124 fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
3125 fdt2 = float64_sub(fdt2, float64_one, &env->active_fpu.fp_status);
3126 fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status));
3127 update_fcr31(env, GETPC());
3131 uint32_t helper_float_rsqrt2_s(CPUMIPSState *env, uint32_t fst0, uint32_t fst2)
3133 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3134 fst2 = float32_sub(fst2, float32_one, &env->active_fpu.fp_status);
3135 fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
3136 update_fcr31(env, GETPC());
3140 uint64_t helper_float_rsqrt2_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
3142 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3143 uint32_t fsth0 = fdt0 >> 32;
3144 uint32_t fst2 = fdt2 & 0XFFFFFFFF;
3145 uint32_t fsth2 = fdt2 >> 32;
3147 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3148 fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
3149 fst2 = float32_sub(fst2, float32_one, &env->active_fpu.fp_status);
3150 fsth2 = float32_sub(fsth2, float32_one, &env->active_fpu.fp_status);
3151 fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
3152 fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status));
3153 update_fcr31(env, GETPC());
3154 return ((uint64_t)fsth2 << 32) | fst2;
3157 uint64_t helper_float_addr_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt1)
3159 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3160 uint32_t fsth0 = fdt0 >> 32;
3161 uint32_t fst1 = fdt1 & 0XFFFFFFFF;
3162 uint32_t fsth1 = fdt1 >> 32;
3166 fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status);
3167 fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status);
3168 update_fcr31(env, GETPC());
3169 return ((uint64_t)fsth2 << 32) | fst2;
3172 uint64_t helper_float_mulr_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt1)
3174 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3175 uint32_t fsth0 = fdt0 >> 32;
3176 uint32_t fst1 = fdt1 & 0XFFFFFFFF;
3177 uint32_t fsth1 = fdt1 >> 32;
3181 fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status);
3182 fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status);
3183 update_fcr31(env, GETPC());
3184 return ((uint64_t)fsth2 << 32) | fst2;
3187 /* compare operations */
3188 #define FOP_COND_D(op, cond) \
3189 void helper_cmp_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \
3190 uint64_t fdt1, int cc) \
3194 update_fcr31(env, GETPC()); \
3196 SET_FP_COND(cc, env->active_fpu); \
3198 CLEAR_FP_COND(cc, env->active_fpu); \
3200 void helper_cmpabs_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \
3201 uint64_t fdt1, int cc) \
3204 fdt0 = float64_abs(fdt0); \
3205 fdt1 = float64_abs(fdt1); \
3207 update_fcr31(env, GETPC()); \
3209 SET_FP_COND(cc, env->active_fpu); \
3211 CLEAR_FP_COND(cc, env->active_fpu); \
3214 /* NOTE: the comma operator will make "cond" to eval to false,
3215 * but float64_unordered_quiet() is still called. */
3216 FOP_COND_D(f, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status), 0))
3217 FOP_COND_D(un, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status))
3218 FOP_COND_D(eq, float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3219 FOP_COND_D(ueq, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3220 FOP_COND_D(olt, float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3221 FOP_COND_D(ult, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3222 FOP_COND_D(ole, float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3223 FOP_COND_D(ule, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3224 /* NOTE: the comma operator will make "cond" to eval to false,
3225 * but float64_unordered() is still called. */
3226 FOP_COND_D(sf, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status), 0))
3227 FOP_COND_D(ngle,float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status))
3228 FOP_COND_D(seq, float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3229 FOP_COND_D(ngl, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3230 FOP_COND_D(lt, float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3231 FOP_COND_D(nge, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3232 FOP_COND_D(le, float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3233 FOP_COND_D(ngt, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3235 #define FOP_COND_S(op, cond) \
3236 void helper_cmp_s_ ## op(CPUMIPSState *env, uint32_t fst0, \
3237 uint32_t fst1, int cc) \
3241 update_fcr31(env, GETPC()); \
3243 SET_FP_COND(cc, env->active_fpu); \
3245 CLEAR_FP_COND(cc, env->active_fpu); \
3247 void helper_cmpabs_s_ ## op(CPUMIPSState *env, uint32_t fst0, \
3248 uint32_t fst1, int cc) \
3251 fst0 = float32_abs(fst0); \
3252 fst1 = float32_abs(fst1); \
3254 update_fcr31(env, GETPC()); \
3256 SET_FP_COND(cc, env->active_fpu); \
3258 CLEAR_FP_COND(cc, env->active_fpu); \
3261 /* NOTE: the comma operator will make "cond" to eval to false,
3262 * but float32_unordered_quiet() is still called. */
3263 FOP_COND_S(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0))
3264 FOP_COND_S(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status))
3265 FOP_COND_S(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
3266 FOP_COND_S(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
3267 FOP_COND_S(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
3268 FOP_COND_S(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
3269 FOP_COND_S(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
3270 FOP_COND_S(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
3271 /* NOTE: the comma operator will make "cond" to eval to false,
3272 * but float32_unordered() is still called. */
3273 FOP_COND_S(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0))
3274 FOP_COND_S(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status))
3275 FOP_COND_S(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3276 FOP_COND_S(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3277 FOP_COND_S(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3278 FOP_COND_S(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3279 FOP_COND_S(le, float32_le(fst0, fst1, &env->active_fpu.fp_status))
3280 FOP_COND_S(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status))
3282 #define FOP_COND_PS(op, condl, condh) \
3283 void helper_cmp_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \
3284 uint64_t fdt1, int cc) \
3286 uint32_t fst0, fsth0, fst1, fsth1; \
3288 fst0 = fdt0 & 0XFFFFFFFF; \
3289 fsth0 = fdt0 >> 32; \
3290 fst1 = fdt1 & 0XFFFFFFFF; \
3291 fsth1 = fdt1 >> 32; \
3294 update_fcr31(env, GETPC()); \
3296 SET_FP_COND(cc, env->active_fpu); \
3298 CLEAR_FP_COND(cc, env->active_fpu); \
3300 SET_FP_COND(cc + 1, env->active_fpu); \
3302 CLEAR_FP_COND(cc + 1, env->active_fpu); \
3304 void helper_cmpabs_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \
3305 uint64_t fdt1, int cc) \
3307 uint32_t fst0, fsth0, fst1, fsth1; \
3309 fst0 = float32_abs(fdt0 & 0XFFFFFFFF); \
3310 fsth0 = float32_abs(fdt0 >> 32); \
3311 fst1 = float32_abs(fdt1 & 0XFFFFFFFF); \
3312 fsth1 = float32_abs(fdt1 >> 32); \
3315 update_fcr31(env, GETPC()); \
3317 SET_FP_COND(cc, env->active_fpu); \
3319 CLEAR_FP_COND(cc, env->active_fpu); \
3321 SET_FP_COND(cc + 1, env->active_fpu); \
3323 CLEAR_FP_COND(cc + 1, env->active_fpu); \
3326 /* NOTE: the comma operator will make "cond" to eval to false,
3327 * but float32_unordered_quiet() is still called. */
3328 FOP_COND_PS(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0),
3329 (float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status), 0))
3330 FOP_COND_PS(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status),
3331 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status))
3332 FOP_COND_PS(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
3333 float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3334 FOP_COND_PS(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
3335 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3336 FOP_COND_PS(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
3337 float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3338 FOP_COND_PS(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
3339 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3340 FOP_COND_PS(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
3341 float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3342 FOP_COND_PS(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
3343 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3344 /* NOTE: the comma operator will make "cond" to eval to false,
3345 * but float32_unordered() is still called. */
3346 FOP_COND_PS(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0),
3347 (float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status), 0))
3348 FOP_COND_PS(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status),
3349 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status))
3350 FOP_COND_PS(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3351 float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3352 FOP_COND_PS(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3353 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3354 FOP_COND_PS(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3355 float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3356 FOP_COND_PS(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3357 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3358 FOP_COND_PS(le, float32_le(fst0, fst1, &env->active_fpu.fp_status),
3359 float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3360 FOP_COND_PS(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status),
3361 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))