2 * MIPS emulation helpers for qemu.
4 * Copyright (c) 2004-2005 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "dyngen-exec.h"
23 #include "host-utils.h"
27 #if !defined(CONFIG_USER_ONLY)
28 #include "softmmu_exec.h"
29 #endif /* !defined(CONFIG_USER_ONLY) */
31 #ifndef CONFIG_USER_ONLY
32 static inline void cpu_mips_tlb_flush (CPUMIPSState *env, int flush_global);
35 static inline void compute_hflags(CPUMIPSState *env)
37 env->hflags &= ~(MIPS_HFLAG_COP1X | MIPS_HFLAG_64 | MIPS_HFLAG_CP0 |
38 MIPS_HFLAG_F64 | MIPS_HFLAG_FPU | MIPS_HFLAG_KSU |
40 if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
41 !(env->CP0_Status & (1 << CP0St_ERL)) &&
42 !(env->hflags & MIPS_HFLAG_DM)) {
43 env->hflags |= (env->CP0_Status >> CP0St_KSU) & MIPS_HFLAG_KSU;
45 #if defined(TARGET_MIPS64)
46 if (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_UM) ||
47 (env->CP0_Status & (1 << CP0St_PX)) ||
48 (env->CP0_Status & (1 << CP0St_UX))) {
49 env->hflags |= MIPS_HFLAG_64;
51 if (env->CP0_Status & (1 << CP0St_UX)) {
52 env->hflags |= MIPS_HFLAG_UX;
55 if ((env->CP0_Status & (1 << CP0St_CU0)) ||
56 !(env->hflags & MIPS_HFLAG_KSU)) {
57 env->hflags |= MIPS_HFLAG_CP0;
59 if (env->CP0_Status & (1 << CP0St_CU1)) {
60 env->hflags |= MIPS_HFLAG_FPU;
62 if (env->CP0_Status & (1 << CP0St_FR)) {
63 env->hflags |= MIPS_HFLAG_F64;
65 if (env->insn_flags & ISA_MIPS32R2) {
66 if (env->active_fpu.fcr0 & (1 << FCR0_F64)) {
67 env->hflags |= MIPS_HFLAG_COP1X;
69 } else if (env->insn_flags & ISA_MIPS32) {
70 if (env->hflags & MIPS_HFLAG_64) {
71 env->hflags |= MIPS_HFLAG_COP1X;
73 } else if (env->insn_flags & ISA_MIPS4) {
74 /* All supported MIPS IV CPUs use the XX (CU3) to enable
75 and disable the MIPS IV extensions to the MIPS III ISA.
76 Some other MIPS IV CPUs ignore the bit, so the check here
77 would be too restrictive for them. */
78 if (env->CP0_Status & (1 << CP0St_CU3)) {
79 env->hflags |= MIPS_HFLAG_COP1X;
84 /*****************************************************************************/
85 /* Exceptions processing helpers */
87 void helper_raise_exception_err (uint32_t exception, int error_code)
90 if (exception < 0x100)
91 qemu_log("%s: %d %d\n", __func__, exception, error_code);
93 env->exception_index = exception;
94 env->error_code = error_code;
98 void helper_raise_exception (uint32_t exception)
100 helper_raise_exception_err(exception, 0);
103 #if !defined(CONFIG_USER_ONLY)
104 static void do_restore_state(uintptr_t pc)
106 TranslationBlock *tb;
108 tb = tb_find_pc (pc);
110 cpu_restore_state(tb, env, pc);
115 #if defined(CONFIG_USER_ONLY)
116 #define HELPER_LD(name, insn, type) \
117 static inline type do_##name(target_ulong addr, int mem_idx) \
119 return (type) insn##_raw(addr); \
122 #define HELPER_LD(name, insn, type) \
123 static inline type do_##name(target_ulong addr, int mem_idx) \
127 case 0: return (type) insn##_kernel(addr); break; \
128 case 1: return (type) insn##_super(addr); break; \
130 case 2: return (type) insn##_user(addr); break; \
134 HELPER_LD(lbu, ldub, uint8_t)
135 HELPER_LD(lw, ldl, int32_t)
137 HELPER_LD(ld, ldq, int64_t)
141 #if defined(CONFIG_USER_ONLY)
142 #define HELPER_ST(name, insn, type) \
143 static inline void do_##name(target_ulong addr, type val, int mem_idx) \
145 insn##_raw(addr, val); \
148 #define HELPER_ST(name, insn, type) \
149 static inline void do_##name(target_ulong addr, type val, int mem_idx) \
153 case 0: insn##_kernel(addr, val); break; \
154 case 1: insn##_super(addr, val); break; \
156 case 2: insn##_user(addr, val); break; \
160 HELPER_ST(sb, stb, uint8_t)
161 HELPER_ST(sw, stl, uint32_t)
163 HELPER_ST(sd, stq, uint64_t)
167 target_ulong helper_clo (target_ulong arg1)
172 target_ulong helper_clz (target_ulong arg1)
177 #if defined(TARGET_MIPS64)
178 target_ulong helper_dclo (target_ulong arg1)
183 target_ulong helper_dclz (target_ulong arg1)
187 #endif /* TARGET_MIPS64 */
189 /* 64 bits arithmetic for 32 bits hosts */
190 static inline uint64_t get_HILO (void)
192 return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
195 static inline target_ulong set_HIT0_LO(uint64_t HILO)
198 env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
199 tmp = env->active_tc.HI[0] = (int32_t)(HILO >> 32);
203 static inline target_ulong set_HI_LOT0(uint64_t HILO)
205 target_ulong tmp = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
206 env->active_tc.HI[0] = (int32_t)(HILO >> 32);
210 /* Multiplication variants of the vr54xx. */
211 target_ulong helper_muls (target_ulong arg1, target_ulong arg2)
213 return set_HI_LOT0(0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
216 target_ulong helper_mulsu (target_ulong arg1, target_ulong arg2)
218 return set_HI_LOT0(0 - (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
221 target_ulong helper_macc (target_ulong arg1, target_ulong arg2)
223 return set_HI_LOT0((int64_t)get_HILO() + (int64_t)(int32_t)arg1 *
224 (int64_t)(int32_t)arg2);
227 target_ulong helper_macchi (target_ulong arg1, target_ulong arg2)
229 return set_HIT0_LO((int64_t)get_HILO() + (int64_t)(int32_t)arg1 *
230 (int64_t)(int32_t)arg2);
233 target_ulong helper_maccu (target_ulong arg1, target_ulong arg2)
235 return set_HI_LOT0((uint64_t)get_HILO() + (uint64_t)(uint32_t)arg1 *
236 (uint64_t)(uint32_t)arg2);
239 target_ulong helper_macchiu (target_ulong arg1, target_ulong arg2)
241 return set_HIT0_LO((uint64_t)get_HILO() + (uint64_t)(uint32_t)arg1 *
242 (uint64_t)(uint32_t)arg2);
245 target_ulong helper_msac (target_ulong arg1, target_ulong arg2)
247 return set_HI_LOT0((int64_t)get_HILO() - (int64_t)(int32_t)arg1 *
248 (int64_t)(int32_t)arg2);
251 target_ulong helper_msachi (target_ulong arg1, target_ulong arg2)
253 return set_HIT0_LO((int64_t)get_HILO() - (int64_t)(int32_t)arg1 *
254 (int64_t)(int32_t)arg2);
257 target_ulong helper_msacu (target_ulong arg1, target_ulong arg2)
259 return set_HI_LOT0((uint64_t)get_HILO() - (uint64_t)(uint32_t)arg1 *
260 (uint64_t)(uint32_t)arg2);
263 target_ulong helper_msachiu (target_ulong arg1, target_ulong arg2)
265 return set_HIT0_LO((uint64_t)get_HILO() - (uint64_t)(uint32_t)arg1 *
266 (uint64_t)(uint32_t)arg2);
269 target_ulong helper_mulhi (target_ulong arg1, target_ulong arg2)
271 return set_HIT0_LO((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
274 target_ulong helper_mulhiu (target_ulong arg1, target_ulong arg2)
276 return set_HIT0_LO((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
279 target_ulong helper_mulshi (target_ulong arg1, target_ulong arg2)
281 return set_HIT0_LO(0 - (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
284 target_ulong helper_mulshiu (target_ulong arg1, target_ulong arg2)
286 return set_HIT0_LO(0 - (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
290 void helper_dmult (target_ulong arg1, target_ulong arg2)
292 muls64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
295 void helper_dmultu (target_ulong arg1, target_ulong arg2)
297 mulu64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
301 #ifndef CONFIG_USER_ONLY
303 static inline target_phys_addr_t do_translate_address(target_ulong address, int rw)
305 target_phys_addr_t lladdr;
307 lladdr = cpu_mips_translate_address(env, address, rw);
309 if (lladdr == -1LL) {
316 #define HELPER_LD_ATOMIC(name, insn) \
317 target_ulong helper_##name(target_ulong arg, int mem_idx) \
319 env->lladdr = do_translate_address(arg, 0); \
320 env->llval = do_##insn(arg, mem_idx); \
323 HELPER_LD_ATOMIC(ll, lw)
325 HELPER_LD_ATOMIC(lld, ld)
327 #undef HELPER_LD_ATOMIC
329 #define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask) \
330 target_ulong helper_##name(target_ulong arg1, target_ulong arg2, int mem_idx) \
334 if (arg2 & almask) { \
335 env->CP0_BadVAddr = arg2; \
336 helper_raise_exception(EXCP_AdES); \
338 if (do_translate_address(arg2, 1) == env->lladdr) { \
339 tmp = do_##ld_insn(arg2, mem_idx); \
340 if (tmp == env->llval) { \
341 do_##st_insn(arg2, arg1, mem_idx); \
347 HELPER_ST_ATOMIC(sc, lw, sw, 0x3)
349 HELPER_ST_ATOMIC(scd, ld, sd, 0x7)
351 #undef HELPER_ST_ATOMIC
354 #ifdef TARGET_WORDS_BIGENDIAN
355 #define GET_LMASK(v) ((v) & 3)
356 #define GET_OFFSET(addr, offset) (addr + (offset))
358 #define GET_LMASK(v) (((v) & 3) ^ 3)
359 #define GET_OFFSET(addr, offset) (addr - (offset))
362 target_ulong helper_lwl(target_ulong arg1, target_ulong arg2, int mem_idx)
366 tmp = do_lbu(arg2, mem_idx);
367 arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
369 if (GET_LMASK(arg2) <= 2) {
370 tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
371 arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
374 if (GET_LMASK(arg2) <= 1) {
375 tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
376 arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
379 if (GET_LMASK(arg2) == 0) {
380 tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
381 arg1 = (arg1 & 0xFFFFFF00) | tmp;
383 return (int32_t)arg1;
386 target_ulong helper_lwr(target_ulong arg1, target_ulong arg2, int mem_idx)
390 tmp = do_lbu(arg2, mem_idx);
391 arg1 = (arg1 & 0xFFFFFF00) | tmp;
393 if (GET_LMASK(arg2) >= 1) {
394 tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
395 arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
398 if (GET_LMASK(arg2) >= 2) {
399 tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
400 arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
403 if (GET_LMASK(arg2) == 3) {
404 tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
405 arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
407 return (int32_t)arg1;
410 void helper_swl(target_ulong arg1, target_ulong arg2, int mem_idx)
412 do_sb(arg2, (uint8_t)(arg1 >> 24), mem_idx);
414 if (GET_LMASK(arg2) <= 2)
415 do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx);
417 if (GET_LMASK(arg2) <= 1)
418 do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx);
420 if (GET_LMASK(arg2) == 0)
421 do_sb(GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx);
424 void helper_swr(target_ulong arg1, target_ulong arg2, int mem_idx)
426 do_sb(arg2, (uint8_t)arg1, mem_idx);
428 if (GET_LMASK(arg2) >= 1)
429 do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
431 if (GET_LMASK(arg2) >= 2)
432 do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
434 if (GET_LMASK(arg2) == 3)
435 do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
438 #if defined(TARGET_MIPS64)
439 /* "half" load and stores. We must do the memory access inline,
440 or fault handling won't work. */
442 #ifdef TARGET_WORDS_BIGENDIAN
443 #define GET_LMASK64(v) ((v) & 7)
445 #define GET_LMASK64(v) (((v) & 7) ^ 7)
448 target_ulong helper_ldl(target_ulong arg1, target_ulong arg2, int mem_idx)
452 tmp = do_lbu(arg2, mem_idx);
453 arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
455 if (GET_LMASK64(arg2) <= 6) {
456 tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
457 arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
460 if (GET_LMASK64(arg2) <= 5) {
461 tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
462 arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
465 if (GET_LMASK64(arg2) <= 4) {
466 tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
467 arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
470 if (GET_LMASK64(arg2) <= 3) {
471 tmp = do_lbu(GET_OFFSET(arg2, 4), mem_idx);
472 arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
475 if (GET_LMASK64(arg2) <= 2) {
476 tmp = do_lbu(GET_OFFSET(arg2, 5), mem_idx);
477 arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
480 if (GET_LMASK64(arg2) <= 1) {
481 tmp = do_lbu(GET_OFFSET(arg2, 6), mem_idx);
482 arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
485 if (GET_LMASK64(arg2) == 0) {
486 tmp = do_lbu(GET_OFFSET(arg2, 7), mem_idx);
487 arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
493 target_ulong helper_ldr(target_ulong arg1, target_ulong arg2, int mem_idx)
497 tmp = do_lbu(arg2, mem_idx);
498 arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
500 if (GET_LMASK64(arg2) >= 1) {
501 tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
502 arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
505 if (GET_LMASK64(arg2) >= 2) {
506 tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
507 arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
510 if (GET_LMASK64(arg2) >= 3) {
511 tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
512 arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
515 if (GET_LMASK64(arg2) >= 4) {
516 tmp = do_lbu(GET_OFFSET(arg2, -4), mem_idx);
517 arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
520 if (GET_LMASK64(arg2) >= 5) {
521 tmp = do_lbu(GET_OFFSET(arg2, -5), mem_idx);
522 arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
525 if (GET_LMASK64(arg2) >= 6) {
526 tmp = do_lbu(GET_OFFSET(arg2, -6), mem_idx);
527 arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
530 if (GET_LMASK64(arg2) == 7) {
531 tmp = do_lbu(GET_OFFSET(arg2, -7), mem_idx);
532 arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
538 void helper_sdl(target_ulong arg1, target_ulong arg2, int mem_idx)
540 do_sb(arg2, (uint8_t)(arg1 >> 56), mem_idx);
542 if (GET_LMASK64(arg2) <= 6)
543 do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx);
545 if (GET_LMASK64(arg2) <= 5)
546 do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx);
548 if (GET_LMASK64(arg2) <= 4)
549 do_sb(GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx);
551 if (GET_LMASK64(arg2) <= 3)
552 do_sb(GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx);
554 if (GET_LMASK64(arg2) <= 2)
555 do_sb(GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx);
557 if (GET_LMASK64(arg2) <= 1)
558 do_sb(GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx);
560 if (GET_LMASK64(arg2) <= 0)
561 do_sb(GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx);
564 void helper_sdr(target_ulong arg1, target_ulong arg2, int mem_idx)
566 do_sb(arg2, (uint8_t)arg1, mem_idx);
568 if (GET_LMASK64(arg2) >= 1)
569 do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
571 if (GET_LMASK64(arg2) >= 2)
572 do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
574 if (GET_LMASK64(arg2) >= 3)
575 do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
577 if (GET_LMASK64(arg2) >= 4)
578 do_sb(GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx);
580 if (GET_LMASK64(arg2) >= 5)
581 do_sb(GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx);
583 if (GET_LMASK64(arg2) >= 6)
584 do_sb(GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx);
586 if (GET_LMASK64(arg2) == 7)
587 do_sb(GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx);
589 #endif /* TARGET_MIPS64 */
591 static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
593 void helper_lwm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
595 target_ulong base_reglist = reglist & 0xf;
596 target_ulong do_r31 = reglist & 0x10;
597 #ifdef CONFIG_USER_ONLY
599 #define ldfun ldl_raw
601 uint32_t (*ldfun)(target_ulong);
605 case 0: ldfun = ldl_kernel; break;
606 case 1: ldfun = ldl_super; break;
608 case 2: ldfun = ldl_user; break;
612 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
615 for (i = 0; i < base_reglist; i++) {
616 env->active_tc.gpr[multiple_regs[i]] = (target_long) ldfun(addr);
622 env->active_tc.gpr[31] = (target_long) ldfun(addr);
626 void helper_swm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
628 target_ulong base_reglist = reglist & 0xf;
629 target_ulong do_r31 = reglist & 0x10;
630 #ifdef CONFIG_USER_ONLY
632 #define stfun stl_raw
634 void (*stfun)(target_ulong, uint32_t);
638 case 0: stfun = stl_kernel; break;
639 case 1: stfun = stl_super; break;
641 case 2: stfun = stl_user; break;
645 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
648 for (i = 0; i < base_reglist; i++) {
649 stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
655 stfun(addr, env->active_tc.gpr[31]);
659 #if defined(TARGET_MIPS64)
660 void helper_ldm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
662 target_ulong base_reglist = reglist & 0xf;
663 target_ulong do_r31 = reglist & 0x10;
664 #ifdef CONFIG_USER_ONLY
666 #define ldfun ldq_raw
668 uint64_t (*ldfun)(target_ulong);
672 case 0: ldfun = ldq_kernel; break;
673 case 1: ldfun = ldq_super; break;
675 case 2: ldfun = ldq_user; break;
679 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
682 for (i = 0; i < base_reglist; i++) {
683 env->active_tc.gpr[multiple_regs[i]] = ldfun(addr);
689 env->active_tc.gpr[31] = ldfun(addr);
693 void helper_sdm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
695 target_ulong base_reglist = reglist & 0xf;
696 target_ulong do_r31 = reglist & 0x10;
697 #ifdef CONFIG_USER_ONLY
699 #define stfun stq_raw
701 void (*stfun)(target_ulong, uint64_t);
705 case 0: stfun = stq_kernel; break;
706 case 1: stfun = stq_super; break;
708 case 2: stfun = stq_user; break;
712 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
715 for (i = 0; i < base_reglist; i++) {
716 stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
722 stfun(addr, env->active_tc.gpr[31]);
727 #ifndef CONFIG_USER_ONLY
729 static int mips_vpe_is_wfi(CPUMIPSState *c)
731 /* If the VPE is halted but otherwise active, it means it's waiting for
733 return c->halted && mips_vpe_active(c);
736 static inline void mips_vpe_wake(CPUMIPSState *c)
738 /* Dont set ->halted = 0 directly, let it be done via cpu_has_work
739 because there might be other conditions that state that c should
741 cpu_interrupt(c, CPU_INTERRUPT_WAKE);
744 static inline void mips_vpe_sleep(CPUMIPSState *c)
746 /* The VPE was shut off, really go to bed.
747 Reset any old _WAKE requests. */
749 cpu_reset_interrupt(c, CPU_INTERRUPT_WAKE);
752 static inline void mips_tc_wake(CPUMIPSState *c, int tc)
754 /* FIXME: TC reschedule. */
755 if (mips_vpe_active(c) && !mips_vpe_is_wfi(c)) {
760 static inline void mips_tc_sleep(CPUMIPSState *c, int tc)
762 /* FIXME: TC reschedule. */
763 if (!mips_vpe_active(c)) {
768 /* tc should point to an int with the value of the global TC index.
769 This function will transform it into a local index within the
770 returned CPUMIPSState.
772 FIXME: This code assumes that all VPEs have the same number of TCs,
773 which depends on runtime setup. Can probably be fixed by
774 walking the list of CPUMIPSStates. */
775 static CPUMIPSState *mips_cpu_map_tc(int *tc)
778 int vpe_idx, nr_threads = env->nr_threads;
781 if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) {
782 /* Not allowed to address other CPUs. */
783 *tc = env->current_tc;
787 vpe_idx = tc_idx / nr_threads;
788 *tc = tc_idx % nr_threads;
789 other = qemu_get_cpu(vpe_idx);
790 return other ? other : env;
793 /* The per VPE CP0_Status register shares some fields with the per TC
794 CP0_TCStatus registers. These fields are wired to the same registers,
795 so changes to either of them should be reflected on both registers.
797 Also, EntryHi shares the bottom 8 bit ASID with TCStauts.
799 These helper call synchronizes the regs for a given cpu. */
801 /* Called for updates to CP0_Status. */
802 static void sync_c0_status(CPUMIPSState *cpu, int tc)
804 int32_t tcstatus, *tcst;
805 uint32_t v = cpu->CP0_Status;
806 uint32_t cu, mx, asid, ksu;
807 uint32_t mask = ((1 << CP0TCSt_TCU3)
808 | (1 << CP0TCSt_TCU2)
809 | (1 << CP0TCSt_TCU1)
810 | (1 << CP0TCSt_TCU0)
812 | (3 << CP0TCSt_TKSU)
813 | (0xff << CP0TCSt_TASID));
815 cu = (v >> CP0St_CU0) & 0xf;
816 mx = (v >> CP0St_MX) & 0x1;
817 ksu = (v >> CP0St_KSU) & 0x3;
818 asid = env->CP0_EntryHi & 0xff;
820 tcstatus = cu << CP0TCSt_TCU0;
821 tcstatus |= mx << CP0TCSt_TMX;
822 tcstatus |= ksu << CP0TCSt_TKSU;
825 if (tc == cpu->current_tc) {
826 tcst = &cpu->active_tc.CP0_TCStatus;
828 tcst = &cpu->tcs[tc].CP0_TCStatus;
836 /* Called for updates to CP0_TCStatus. */
837 static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc, target_ulong v)
840 uint32_t tcu, tmx, tasid, tksu;
841 uint32_t mask = ((1 << CP0St_CU3)
848 tcu = (v >> CP0TCSt_TCU0) & 0xf;
849 tmx = (v >> CP0TCSt_TMX) & 0x1;
851 tksu = (v >> CP0TCSt_TKSU) & 0x3;
853 status = tcu << CP0St_CU0;
854 status |= tmx << CP0St_MX;
855 status |= tksu << CP0St_KSU;
857 cpu->CP0_Status &= ~mask;
858 cpu->CP0_Status |= status;
860 /* Sync the TASID with EntryHi. */
861 cpu->CP0_EntryHi &= ~0xff;
862 cpu->CP0_EntryHi = tasid;
867 /* Called for updates to CP0_EntryHi. */
868 static void sync_c0_entryhi(CPUMIPSState *cpu, int tc)
871 uint32_t asid, v = cpu->CP0_EntryHi;
875 if (tc == cpu->current_tc) {
876 tcst = &cpu->active_tc.CP0_TCStatus;
878 tcst = &cpu->tcs[tc].CP0_TCStatus;
886 target_ulong helper_mfc0_mvpcontrol (void)
888 return env->mvp->CP0_MVPControl;
891 target_ulong helper_mfc0_mvpconf0 (void)
893 return env->mvp->CP0_MVPConf0;
896 target_ulong helper_mfc0_mvpconf1 (void)
898 return env->mvp->CP0_MVPConf1;
901 target_ulong helper_mfc0_random (void)
903 return (int32_t)cpu_mips_get_random(env);
906 target_ulong helper_mfc0_tcstatus (void)
908 return env->active_tc.CP0_TCStatus;
911 target_ulong helper_mftc0_tcstatus(void)
913 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
914 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
916 if (other_tc == other->current_tc)
917 return other->active_tc.CP0_TCStatus;
919 return other->tcs[other_tc].CP0_TCStatus;
922 target_ulong helper_mfc0_tcbind (void)
924 return env->active_tc.CP0_TCBind;
927 target_ulong helper_mftc0_tcbind(void)
929 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
930 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
932 if (other_tc == other->current_tc)
933 return other->active_tc.CP0_TCBind;
935 return other->tcs[other_tc].CP0_TCBind;
938 target_ulong helper_mfc0_tcrestart (void)
940 return env->active_tc.PC;
943 target_ulong helper_mftc0_tcrestart(void)
945 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
946 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
948 if (other_tc == other->current_tc)
949 return other->active_tc.PC;
951 return other->tcs[other_tc].PC;
954 target_ulong helper_mfc0_tchalt (void)
956 return env->active_tc.CP0_TCHalt;
959 target_ulong helper_mftc0_tchalt(void)
961 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
962 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
964 if (other_tc == other->current_tc)
965 return other->active_tc.CP0_TCHalt;
967 return other->tcs[other_tc].CP0_TCHalt;
970 target_ulong helper_mfc0_tccontext (void)
972 return env->active_tc.CP0_TCContext;
975 target_ulong helper_mftc0_tccontext(void)
977 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
978 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
980 if (other_tc == other->current_tc)
981 return other->active_tc.CP0_TCContext;
983 return other->tcs[other_tc].CP0_TCContext;
986 target_ulong helper_mfc0_tcschedule (void)
988 return env->active_tc.CP0_TCSchedule;
991 target_ulong helper_mftc0_tcschedule(void)
993 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
994 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
996 if (other_tc == other->current_tc)
997 return other->active_tc.CP0_TCSchedule;
999 return other->tcs[other_tc].CP0_TCSchedule;
1002 target_ulong helper_mfc0_tcschefback (void)
1004 return env->active_tc.CP0_TCScheFBack;
1007 target_ulong helper_mftc0_tcschefback(void)
1009 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1010 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1012 if (other_tc == other->current_tc)
1013 return other->active_tc.CP0_TCScheFBack;
1015 return other->tcs[other_tc].CP0_TCScheFBack;
1018 target_ulong helper_mfc0_count (void)
1020 return (int32_t)cpu_mips_get_count(env);
1023 target_ulong helper_mftc0_entryhi(void)
1025 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1026 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1028 return other->CP0_EntryHi;
1031 target_ulong helper_mftc0_cause(void)
1033 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1035 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1037 if (other_tc == other->current_tc) {
1038 tccause = other->CP0_Cause;
1040 tccause = other->CP0_Cause;
1046 target_ulong helper_mftc0_status(void)
1048 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1049 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1051 return other->CP0_Status;
1054 target_ulong helper_mfc0_lladdr (void)
1056 return (int32_t)(env->lladdr >> env->CP0_LLAddr_shift);
1059 target_ulong helper_mfc0_watchlo (uint32_t sel)
1061 return (int32_t)env->CP0_WatchLo[sel];
1064 target_ulong helper_mfc0_watchhi (uint32_t sel)
1066 return env->CP0_WatchHi[sel];
1069 target_ulong helper_mfc0_debug (void)
1071 target_ulong t0 = env->CP0_Debug;
1072 if (env->hflags & MIPS_HFLAG_DM)
1073 t0 |= 1 << CP0DB_DM;
1078 target_ulong helper_mftc0_debug(void)
1080 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1082 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1084 if (other_tc == other->current_tc)
1085 tcstatus = other->active_tc.CP0_Debug_tcstatus;
1087 tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus;
1089 /* XXX: Might be wrong, check with EJTAG spec. */
1090 return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1091 (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1094 #if defined(TARGET_MIPS64)
1095 target_ulong helper_dmfc0_tcrestart (void)
1097 return env->active_tc.PC;
1100 target_ulong helper_dmfc0_tchalt (void)
1102 return env->active_tc.CP0_TCHalt;
1105 target_ulong helper_dmfc0_tccontext (void)
1107 return env->active_tc.CP0_TCContext;
1110 target_ulong helper_dmfc0_tcschedule (void)
1112 return env->active_tc.CP0_TCSchedule;
1115 target_ulong helper_dmfc0_tcschefback (void)
1117 return env->active_tc.CP0_TCScheFBack;
1120 target_ulong helper_dmfc0_lladdr (void)
1122 return env->lladdr >> env->CP0_LLAddr_shift;
1125 target_ulong helper_dmfc0_watchlo (uint32_t sel)
1127 return env->CP0_WatchLo[sel];
1129 #endif /* TARGET_MIPS64 */
1131 void helper_mtc0_index (target_ulong arg1)
1134 unsigned int tmp = env->tlb->nb_tlb;
1140 env->CP0_Index = (env->CP0_Index & 0x80000000) | (arg1 & (num - 1));
1143 void helper_mtc0_mvpcontrol (target_ulong arg1)
1148 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
1149 mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
1150 (1 << CP0MVPCo_EVP);
1151 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1152 mask |= (1 << CP0MVPCo_STLB);
1153 newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
1155 // TODO: Enable/disable shared TLB, enable/disable VPEs.
1157 env->mvp->CP0_MVPControl = newval;
1160 void helper_mtc0_vpecontrol (target_ulong arg1)
1165 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
1166 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
1167 newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
1169 /* Yield scheduler intercept not implemented. */
1170 /* Gating storage scheduler intercept not implemented. */
1172 // TODO: Enable/disable TCs.
1174 env->CP0_VPEControl = newval;
1177 void helper_mttc0_vpecontrol(target_ulong arg1)
1179 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1180 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1184 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
1185 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
1186 newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask);
1188 /* TODO: Enable/disable TCs. */
1190 other->CP0_VPEControl = newval;
1193 target_ulong helper_mftc0_vpecontrol(void)
1195 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1196 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1197 /* FIXME: Mask away return zero on read bits. */
1198 return other->CP0_VPEControl;
1201 target_ulong helper_mftc0_vpeconf0(void)
1203 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1204 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1206 return other->CP0_VPEConf0;
1209 void helper_mtc0_vpeconf0 (target_ulong arg1)
1214 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
1215 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
1216 mask |= (0xff << CP0VPEC0_XTC);
1217 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1219 newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1221 // TODO: TC exclusive handling due to ERL/EXL.
1223 env->CP0_VPEConf0 = newval;
1226 void helper_mttc0_vpeconf0(target_ulong arg1)
1228 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1229 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1233 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1234 newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1236 /* TODO: TC exclusive handling due to ERL/EXL. */
1237 other->CP0_VPEConf0 = newval;
1240 void helper_mtc0_vpeconf1 (target_ulong arg1)
1245 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1246 mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
1247 (0xff << CP0VPEC1_NCP1);
1248 newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
1250 /* UDI not implemented. */
1251 /* CP2 not implemented. */
1253 // TODO: Handle FPU (CP1) binding.
1255 env->CP0_VPEConf1 = newval;
1258 void helper_mtc0_yqmask (target_ulong arg1)
1260 /* Yield qualifier inputs not implemented. */
1261 env->CP0_YQMask = 0x00000000;
1264 void helper_mtc0_vpeopt (target_ulong arg1)
1266 env->CP0_VPEOpt = arg1 & 0x0000ffff;
1269 void helper_mtc0_entrylo0 (target_ulong arg1)
1271 /* Large physaddr (PABITS) not implemented */
1272 /* 1k pages not implemented */
1273 env->CP0_EntryLo0 = arg1 & 0x3FFFFFFF;
1276 void helper_mtc0_tcstatus (target_ulong arg1)
1278 uint32_t mask = env->CP0_TCStatus_rw_bitmask;
1281 newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
1283 env->active_tc.CP0_TCStatus = newval;
1284 sync_c0_tcstatus(env, env->current_tc, newval);
1287 void helper_mttc0_tcstatus (target_ulong arg1)
1289 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1290 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1292 if (other_tc == other->current_tc)
1293 other->active_tc.CP0_TCStatus = arg1;
1295 other->tcs[other_tc].CP0_TCStatus = arg1;
1296 sync_c0_tcstatus(other, other_tc, arg1);
1299 void helper_mtc0_tcbind (target_ulong arg1)
1301 uint32_t mask = (1 << CP0TCBd_TBE);
1304 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1305 mask |= (1 << CP0TCBd_CurVPE);
1306 newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1307 env->active_tc.CP0_TCBind = newval;
1310 void helper_mttc0_tcbind (target_ulong arg1)
1312 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1313 uint32_t mask = (1 << CP0TCBd_TBE);
1315 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1317 if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1318 mask |= (1 << CP0TCBd_CurVPE);
1319 if (other_tc == other->current_tc) {
1320 newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1321 other->active_tc.CP0_TCBind = newval;
1323 newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
1324 other->tcs[other_tc].CP0_TCBind = newval;
1328 void helper_mtc0_tcrestart (target_ulong arg1)
1330 env->active_tc.PC = arg1;
1331 env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1333 /* MIPS16 not implemented. */
1336 void helper_mttc0_tcrestart (target_ulong arg1)
1338 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1339 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1341 if (other_tc == other->current_tc) {
1342 other->active_tc.PC = arg1;
1343 other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1344 other->lladdr = 0ULL;
1345 /* MIPS16 not implemented. */
1347 other->tcs[other_tc].PC = arg1;
1348 other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1349 other->lladdr = 0ULL;
1350 /* MIPS16 not implemented. */
1354 void helper_mtc0_tchalt (target_ulong arg1)
1356 env->active_tc.CP0_TCHalt = arg1 & 0x1;
1358 // TODO: Halt TC / Restart (if allocated+active) TC.
1359 if (env->active_tc.CP0_TCHalt & 1) {
1360 mips_tc_sleep(env, env->current_tc);
1362 mips_tc_wake(env, env->current_tc);
1366 void helper_mttc0_tchalt (target_ulong arg1)
1368 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1369 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1371 // TODO: Halt TC / Restart (if allocated+active) TC.
1373 if (other_tc == other->current_tc)
1374 other->active_tc.CP0_TCHalt = arg1;
1376 other->tcs[other_tc].CP0_TCHalt = arg1;
1379 mips_tc_sleep(other, other_tc);
1381 mips_tc_wake(other, other_tc);
1385 void helper_mtc0_tccontext (target_ulong arg1)
1387 env->active_tc.CP0_TCContext = arg1;
1390 void helper_mttc0_tccontext (target_ulong arg1)
1392 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1393 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1395 if (other_tc == other->current_tc)
1396 other->active_tc.CP0_TCContext = arg1;
1398 other->tcs[other_tc].CP0_TCContext = arg1;
1401 void helper_mtc0_tcschedule (target_ulong arg1)
1403 env->active_tc.CP0_TCSchedule = arg1;
1406 void helper_mttc0_tcschedule (target_ulong arg1)
1408 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1409 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1411 if (other_tc == other->current_tc)
1412 other->active_tc.CP0_TCSchedule = arg1;
1414 other->tcs[other_tc].CP0_TCSchedule = arg1;
1417 void helper_mtc0_tcschefback (target_ulong arg1)
1419 env->active_tc.CP0_TCScheFBack = arg1;
1422 void helper_mttc0_tcschefback (target_ulong arg1)
1424 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1425 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1427 if (other_tc == other->current_tc)
1428 other->active_tc.CP0_TCScheFBack = arg1;
1430 other->tcs[other_tc].CP0_TCScheFBack = arg1;
1433 void helper_mtc0_entrylo1 (target_ulong arg1)
1435 /* Large physaddr (PABITS) not implemented */
1436 /* 1k pages not implemented */
1437 env->CP0_EntryLo1 = arg1 & 0x3FFFFFFF;
1440 void helper_mtc0_context (target_ulong arg1)
1442 env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
1445 void helper_mtc0_pagemask (target_ulong arg1)
1447 /* 1k pages not implemented */
1448 env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
1451 void helper_mtc0_pagegrain (target_ulong arg1)
1453 /* SmartMIPS not implemented */
1454 /* Large physaddr (PABITS) not implemented */
1455 /* 1k pages not implemented */
1456 env->CP0_PageGrain = 0;
1459 void helper_mtc0_wired (target_ulong arg1)
1461 env->CP0_Wired = arg1 % env->tlb->nb_tlb;
1464 void helper_mtc0_srsconf0 (target_ulong arg1)
1466 env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
1469 void helper_mtc0_srsconf1 (target_ulong arg1)
1471 env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
1474 void helper_mtc0_srsconf2 (target_ulong arg1)
1476 env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
1479 void helper_mtc0_srsconf3 (target_ulong arg1)
1481 env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
1484 void helper_mtc0_srsconf4 (target_ulong arg1)
1486 env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
1489 void helper_mtc0_hwrena (target_ulong arg1)
1491 env->CP0_HWREna = arg1 & 0x0000000F;
1494 void helper_mtc0_count (target_ulong arg1)
1496 cpu_mips_store_count(env, arg1);
1499 void helper_mtc0_entryhi (target_ulong arg1)
1501 target_ulong old, val;
1503 /* 1k pages not implemented */
1504 val = arg1 & ((TARGET_PAGE_MASK << 1) | 0xFF);
1505 #if defined(TARGET_MIPS64)
1506 val &= env->SEGMask;
1508 old = env->CP0_EntryHi;
1509 env->CP0_EntryHi = val;
1510 if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1511 sync_c0_entryhi(env, env->current_tc);
1513 /* If the ASID changes, flush qemu's TLB. */
1514 if ((old & 0xFF) != (val & 0xFF))
1515 cpu_mips_tlb_flush(env, 1);
1518 void helper_mttc0_entryhi(target_ulong arg1)
1520 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1521 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1523 other->CP0_EntryHi = arg1;
1524 sync_c0_entryhi(other, other_tc);
1527 void helper_mtc0_compare (target_ulong arg1)
1529 cpu_mips_store_compare(env, arg1);
1532 void helper_mtc0_status (target_ulong arg1)
1535 uint32_t mask = env->CP0_Status_rw_bitmask;
1538 old = env->CP0_Status;
1539 env->CP0_Status = (env->CP0_Status & ~mask) | val;
1540 if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1541 sync_c0_status(env, env->current_tc);
1543 compute_hflags(env);
1546 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1547 qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1548 old, old & env->CP0_Cause & CP0Ca_IP_mask,
1549 val, val & env->CP0_Cause & CP0Ca_IP_mask,
1551 switch (env->hflags & MIPS_HFLAG_KSU) {
1552 case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1553 case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1554 case MIPS_HFLAG_KM: qemu_log("\n"); break;
1555 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1560 void helper_mttc0_status(target_ulong arg1)
1562 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1563 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1565 other->CP0_Status = arg1 & ~0xf1000018;
1566 sync_c0_status(other, other_tc);
1569 void helper_mtc0_intctl (target_ulong arg1)
1571 /* vectored interrupts not implemented, no performance counters. */
1572 env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0);
1575 void helper_mtc0_srsctl (target_ulong arg1)
1577 uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1578 env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
1581 static void mtc0_cause(CPUMIPSState *cpu, target_ulong arg1)
1583 uint32_t mask = 0x00C00300;
1584 uint32_t old = cpu->CP0_Cause;
1587 if (cpu->insn_flags & ISA_MIPS32R2) {
1588 mask |= 1 << CP0Ca_DC;
1591 cpu->CP0_Cause = (cpu->CP0_Cause & ~mask) | (arg1 & mask);
1593 if ((old ^ cpu->CP0_Cause) & (1 << CP0Ca_DC)) {
1594 if (cpu->CP0_Cause & (1 << CP0Ca_DC)) {
1595 cpu_mips_stop_count(cpu);
1597 cpu_mips_start_count(cpu);
1601 /* Set/reset software interrupts */
1602 for (i = 0 ; i < 2 ; i++) {
1603 if ((old ^ cpu->CP0_Cause) & (1 << (CP0Ca_IP + i))) {
1604 cpu_mips_soft_irq(cpu, i, cpu->CP0_Cause & (1 << (CP0Ca_IP + i)));
1609 void helper_mtc0_cause(target_ulong arg1)
1611 mtc0_cause(env, arg1);
1614 void helper_mttc0_cause(target_ulong arg1)
1616 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1617 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1619 mtc0_cause(other, arg1);
1622 target_ulong helper_mftc0_epc(void)
1624 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1625 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1627 return other->CP0_EPC;
1630 target_ulong helper_mftc0_ebase(void)
1632 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1633 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1635 return other->CP0_EBase;
1638 void helper_mtc0_ebase (target_ulong arg1)
1640 /* vectored interrupts not implemented */
1641 env->CP0_EBase = (env->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
1644 void helper_mttc0_ebase(target_ulong arg1)
1646 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1647 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1648 other->CP0_EBase = (other->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
1651 target_ulong helper_mftc0_configx(target_ulong idx)
1653 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1654 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1657 case 0: return other->CP0_Config0;
1658 case 1: return other->CP0_Config1;
1659 case 2: return other->CP0_Config2;
1660 case 3: return other->CP0_Config3;
1661 /* 4 and 5 are reserved. */
1662 case 6: return other->CP0_Config6;
1663 case 7: return other->CP0_Config7;
1670 void helper_mtc0_config0 (target_ulong arg1)
1672 env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
1675 void helper_mtc0_config2 (target_ulong arg1)
1677 /* tertiary/secondary caches not implemented */
1678 env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1681 void helper_mtc0_lladdr (target_ulong arg1)
1683 target_long mask = env->CP0_LLAddr_rw_bitmask;
1684 arg1 = arg1 << env->CP0_LLAddr_shift;
1685 env->lladdr = (env->lladdr & ~mask) | (arg1 & mask);
1688 void helper_mtc0_watchlo (target_ulong arg1, uint32_t sel)
1690 /* Watch exceptions for instructions, data loads, data stores
1692 env->CP0_WatchLo[sel] = (arg1 & ~0x7);
1695 void helper_mtc0_watchhi (target_ulong arg1, uint32_t sel)
1697 env->CP0_WatchHi[sel] = (arg1 & 0x40FF0FF8);
1698 env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
1701 void helper_mtc0_xcontext (target_ulong arg1)
1703 target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1704 env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
1707 void helper_mtc0_framemask (target_ulong arg1)
1709 env->CP0_Framemask = arg1; /* XXX */
1712 void helper_mtc0_debug (target_ulong arg1)
1714 env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
1715 if (arg1 & (1 << CP0DB_DM))
1716 env->hflags |= MIPS_HFLAG_DM;
1718 env->hflags &= ~MIPS_HFLAG_DM;
1721 void helper_mttc0_debug(target_ulong arg1)
1723 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1724 uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1725 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1727 /* XXX: Might be wrong, check with EJTAG spec. */
1728 if (other_tc == other->current_tc)
1729 other->active_tc.CP0_Debug_tcstatus = val;
1731 other->tcs[other_tc].CP0_Debug_tcstatus = val;
1732 other->CP0_Debug = (other->CP0_Debug &
1733 ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1734 (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1737 void helper_mtc0_performance0 (target_ulong arg1)
1739 env->CP0_Performance0 = arg1 & 0x000007ff;
1742 void helper_mtc0_taglo (target_ulong arg1)
1744 env->CP0_TagLo = arg1 & 0xFFFFFCF6;
1747 void helper_mtc0_datalo (target_ulong arg1)
1749 env->CP0_DataLo = arg1; /* XXX */
1752 void helper_mtc0_taghi (target_ulong arg1)
1754 env->CP0_TagHi = arg1; /* XXX */
1757 void helper_mtc0_datahi (target_ulong arg1)
1759 env->CP0_DataHi = arg1; /* XXX */
1762 /* MIPS MT functions */
1763 target_ulong helper_mftgpr(uint32_t sel)
1765 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1766 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1768 if (other_tc == other->current_tc)
1769 return other->active_tc.gpr[sel];
1771 return other->tcs[other_tc].gpr[sel];
1774 target_ulong helper_mftlo(uint32_t sel)
1776 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1777 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1779 if (other_tc == other->current_tc)
1780 return other->active_tc.LO[sel];
1782 return other->tcs[other_tc].LO[sel];
1785 target_ulong helper_mfthi(uint32_t sel)
1787 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1788 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1790 if (other_tc == other->current_tc)
1791 return other->active_tc.HI[sel];
1793 return other->tcs[other_tc].HI[sel];
1796 target_ulong helper_mftacx(uint32_t sel)
1798 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1799 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1801 if (other_tc == other->current_tc)
1802 return other->active_tc.ACX[sel];
1804 return other->tcs[other_tc].ACX[sel];
1807 target_ulong helper_mftdsp(void)
1809 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1810 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1812 if (other_tc == other->current_tc)
1813 return other->active_tc.DSPControl;
1815 return other->tcs[other_tc].DSPControl;
1818 void helper_mttgpr(target_ulong arg1, uint32_t sel)
1820 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1821 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1823 if (other_tc == other->current_tc)
1824 other->active_tc.gpr[sel] = arg1;
1826 other->tcs[other_tc].gpr[sel] = arg1;
1829 void helper_mttlo(target_ulong arg1, uint32_t sel)
1831 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1832 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1834 if (other_tc == other->current_tc)
1835 other->active_tc.LO[sel] = arg1;
1837 other->tcs[other_tc].LO[sel] = arg1;
1840 void helper_mtthi(target_ulong arg1, uint32_t sel)
1842 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1843 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1845 if (other_tc == other->current_tc)
1846 other->active_tc.HI[sel] = arg1;
1848 other->tcs[other_tc].HI[sel] = arg1;
1851 void helper_mttacx(target_ulong arg1, uint32_t sel)
1853 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1854 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1856 if (other_tc == other->current_tc)
1857 other->active_tc.ACX[sel] = arg1;
1859 other->tcs[other_tc].ACX[sel] = arg1;
1862 void helper_mttdsp(target_ulong arg1)
1864 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1865 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1867 if (other_tc == other->current_tc)
1868 other->active_tc.DSPControl = arg1;
1870 other->tcs[other_tc].DSPControl = arg1;
1873 /* MIPS MT functions */
1874 target_ulong helper_dmt(void)
1880 target_ulong helper_emt(void)
1886 target_ulong helper_dvpe(void)
1888 CPUMIPSState *other_cpu = first_cpu;
1889 target_ulong prev = env->mvp->CP0_MVPControl;
1892 /* Turn off all VPEs except the one executing the dvpe. */
1893 if (other_cpu != env) {
1894 other_cpu->mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP);
1895 mips_vpe_sleep(other_cpu);
1897 other_cpu = other_cpu->next_cpu;
1898 } while (other_cpu);
1902 target_ulong helper_evpe(void)
1904 CPUMIPSState *other_cpu = first_cpu;
1905 target_ulong prev = env->mvp->CP0_MVPControl;
1908 if (other_cpu != env
1909 /* If the VPE is WFI, don't disturb its sleep. */
1910 && !mips_vpe_is_wfi(other_cpu)) {
1911 /* Enable the VPE. */
1912 other_cpu->mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP);
1913 mips_vpe_wake(other_cpu); /* And wake it up. */
1915 other_cpu = other_cpu->next_cpu;
1916 } while (other_cpu);
1919 #endif /* !CONFIG_USER_ONLY */
1921 void helper_fork(target_ulong arg1, target_ulong arg2)
1923 // arg1 = rt, arg2 = rs
1925 // TODO: store to TC register
1928 target_ulong helper_yield(target_ulong arg)
1930 target_long arg1 = arg;
1933 /* No scheduling policy implemented. */
1935 if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
1936 env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
1937 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1938 env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
1939 helper_raise_exception(EXCP_THREAD);
1942 } else if (arg1 == 0) {
1943 if (0 /* TODO: TC underflow */) {
1944 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1945 helper_raise_exception(EXCP_THREAD);
1947 // TODO: Deallocate TC
1949 } else if (arg1 > 0) {
1950 /* Yield qualifier inputs not implemented. */
1951 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1952 env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
1953 helper_raise_exception(EXCP_THREAD);
1955 return env->CP0_YQMask;
1958 #ifndef CONFIG_USER_ONLY
1959 /* TLB management */
1960 static void cpu_mips_tlb_flush (CPUMIPSState *env, int flush_global)
1962 /* Flush qemu's TLB and discard all shadowed entries. */
1963 tlb_flush (env, flush_global);
1964 env->tlb->tlb_in_use = env->tlb->nb_tlb;
1967 static void r4k_mips_tlb_flush_extra (CPUMIPSState *env, int first)
1969 /* Discard entries from env->tlb[first] onwards. */
1970 while (env->tlb->tlb_in_use > first) {
1971 r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
1975 static void r4k_fill_tlb (int idx)
1979 /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
1980 tlb = &env->tlb->mmu.r4k.tlb[idx];
1981 tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
1982 #if defined(TARGET_MIPS64)
1983 tlb->VPN &= env->SEGMask;
1985 tlb->ASID = env->CP0_EntryHi & 0xFF;
1986 tlb->PageMask = env->CP0_PageMask;
1987 tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
1988 tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
1989 tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
1990 tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
1991 tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
1992 tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
1993 tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
1994 tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
1995 tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
1998 void r4k_helper_tlbwi (void)
2002 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
2004 /* Discard cached TLB entries. We could avoid doing this if the
2005 tlbwi is just upgrading access permissions on the current entry;
2006 that might be a further win. */
2007 r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
2009 r4k_invalidate_tlb(env, idx, 0);
2013 void r4k_helper_tlbwr (void)
2015 int r = cpu_mips_get_random(env);
2017 r4k_invalidate_tlb(env, r, 1);
2021 void r4k_helper_tlbp (void)
2030 ASID = env->CP0_EntryHi & 0xFF;
2031 for (i = 0; i < env->tlb->nb_tlb; i++) {
2032 tlb = &env->tlb->mmu.r4k.tlb[i];
2033 /* 1k pages are not supported. */
2034 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
2035 tag = env->CP0_EntryHi & ~mask;
2036 VPN = tlb->VPN & ~mask;
2037 /* Check ASID, virtual page number & size */
2038 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
2044 if (i == env->tlb->nb_tlb) {
2045 /* No match. Discard any shadow entries, if any of them match. */
2046 for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
2047 tlb = &env->tlb->mmu.r4k.tlb[i];
2048 /* 1k pages are not supported. */
2049 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
2050 tag = env->CP0_EntryHi & ~mask;
2051 VPN = tlb->VPN & ~mask;
2052 /* Check ASID, virtual page number & size */
2053 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
2054 r4k_mips_tlb_flush_extra (env, i);
2059 env->CP0_Index |= 0x80000000;
2063 void r4k_helper_tlbr (void)
2069 ASID = env->CP0_EntryHi & 0xFF;
2070 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
2071 tlb = &env->tlb->mmu.r4k.tlb[idx];
2073 /* If this will change the current ASID, flush qemu's TLB. */
2074 if (ASID != tlb->ASID)
2075 cpu_mips_tlb_flush (env, 1);
2077 r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
2079 env->CP0_EntryHi = tlb->VPN | tlb->ASID;
2080 env->CP0_PageMask = tlb->PageMask;
2081 env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
2082 (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
2083 env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
2084 (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
2087 void helper_tlbwi(void)
2089 env->tlb->helper_tlbwi();
2092 void helper_tlbwr(void)
2094 env->tlb->helper_tlbwr();
2097 void helper_tlbp(void)
2099 env->tlb->helper_tlbp();
2102 void helper_tlbr(void)
2104 env->tlb->helper_tlbr();
2108 target_ulong helper_di (void)
2110 target_ulong t0 = env->CP0_Status;
2112 env->CP0_Status = t0 & ~(1 << CP0St_IE);
2116 target_ulong helper_ei (void)
2118 target_ulong t0 = env->CP0_Status;
2120 env->CP0_Status = t0 | (1 << CP0St_IE);
2124 static void debug_pre_eret (void)
2126 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
2127 qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
2128 env->active_tc.PC, env->CP0_EPC);
2129 if (env->CP0_Status & (1 << CP0St_ERL))
2130 qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
2131 if (env->hflags & MIPS_HFLAG_DM)
2132 qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
2137 static void debug_post_eret (void)
2139 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
2140 qemu_log(" => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
2141 env->active_tc.PC, env->CP0_EPC);
2142 if (env->CP0_Status & (1 << CP0St_ERL))
2143 qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
2144 if (env->hflags & MIPS_HFLAG_DM)
2145 qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
2146 switch (env->hflags & MIPS_HFLAG_KSU) {
2147 case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
2148 case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
2149 case MIPS_HFLAG_KM: qemu_log("\n"); break;
2150 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
2155 static void set_pc (target_ulong error_pc)
2157 env->active_tc.PC = error_pc & ~(target_ulong)1;
2159 env->hflags |= MIPS_HFLAG_M16;
2161 env->hflags &= ~(MIPS_HFLAG_M16);
2165 void helper_eret (void)
2168 if (env->CP0_Status & (1 << CP0St_ERL)) {
2169 set_pc(env->CP0_ErrorEPC);
2170 env->CP0_Status &= ~(1 << CP0St_ERL);
2172 set_pc(env->CP0_EPC);
2173 env->CP0_Status &= ~(1 << CP0St_EXL);
2175 compute_hflags(env);
2180 void helper_deret (void)
2183 set_pc(env->CP0_DEPC);
2185 env->hflags &= MIPS_HFLAG_DM;
2186 compute_hflags(env);
2190 #endif /* !CONFIG_USER_ONLY */
2192 target_ulong helper_rdhwr_cpunum(void)
2194 if ((env->hflags & MIPS_HFLAG_CP0) ||
2195 (env->CP0_HWREna & (1 << 0)))
2196 return env->CP0_EBase & 0x3ff;
2198 helper_raise_exception(EXCP_RI);
2203 target_ulong helper_rdhwr_synci_step(void)
2205 if ((env->hflags & MIPS_HFLAG_CP0) ||
2206 (env->CP0_HWREna & (1 << 1)))
2207 return env->SYNCI_Step;
2209 helper_raise_exception(EXCP_RI);
2214 target_ulong helper_rdhwr_cc(void)
2216 if ((env->hflags & MIPS_HFLAG_CP0) ||
2217 (env->CP0_HWREna & (1 << 2)))
2218 return env->CP0_Count;
2220 helper_raise_exception(EXCP_RI);
2225 target_ulong helper_rdhwr_ccres(void)
2227 if ((env->hflags & MIPS_HFLAG_CP0) ||
2228 (env->CP0_HWREna & (1 << 3)))
2231 helper_raise_exception(EXCP_RI);
2236 void helper_pmon (int function)
2240 case 2: /* TODO: char inbyte(int waitflag); */
2241 if (env->active_tc.gpr[4] == 0)
2242 env->active_tc.gpr[2] = -1;
2244 case 11: /* TODO: char inbyte (void); */
2245 env->active_tc.gpr[2] = -1;
2249 printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
2255 unsigned char *fmt = (void *)(uintptr_t)env->active_tc.gpr[4];
2262 void helper_wait (void)
2265 cpu_reset_interrupt(env, CPU_INTERRUPT_WAKE);
2266 helper_raise_exception(EXCP_HLT);
2269 #if !defined(CONFIG_USER_ONLY)
2271 static void QEMU_NORETURN do_unaligned_access(target_ulong addr, int is_write,
2272 int is_user, uintptr_t retaddr);
2274 #define MMUSUFFIX _mmu
2275 #define ALIGNED_ONLY
2278 #include "softmmu_template.h"
2281 #include "softmmu_template.h"
2284 #include "softmmu_template.h"
2287 #include "softmmu_template.h"
2289 static void do_unaligned_access(target_ulong addr, int is_write,
2290 int is_user, uintptr_t retaddr)
2292 env->CP0_BadVAddr = addr;
2293 do_restore_state (retaddr);
2294 helper_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
2297 void tlb_fill(CPUMIPSState *env1, target_ulong addr, int is_write, int mmu_idx,
2300 TranslationBlock *tb;
2301 CPUMIPSState *saved_env;
2306 ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx);
2309 /* now we have a real cpu fault */
2310 tb = tb_find_pc(retaddr);
2312 /* the PC is inside the translated code. It means that we have
2313 a virtual CPU fault */
2314 cpu_restore_state(tb, env, retaddr);
2317 helper_raise_exception_err(env->exception_index, env->error_code);
2322 void cpu_unassigned_access(CPUMIPSState *env1, target_phys_addr_t addr,
2323 int is_write, int is_exec, int unused, int size)
2328 helper_raise_exception(EXCP_IBE);
2330 helper_raise_exception(EXCP_DBE);
2332 #endif /* !CONFIG_USER_ONLY */
2334 /* Complex FPU operations which may need stack space. */
2336 #define FLOAT_ONE32 make_float32(0x3f8 << 20)
2337 #define FLOAT_ONE64 make_float64(0x3ffULL << 52)
2338 #define FLOAT_TWO32 make_float32(1 << 30)
2339 #define FLOAT_TWO64 make_float64(1ULL << 62)
2340 #define FLOAT_QNAN32 0x7fbfffff
2341 #define FLOAT_QNAN64 0x7ff7ffffffffffffULL
2342 #define FLOAT_SNAN32 0x7fffffff
2343 #define FLOAT_SNAN64 0x7fffffffffffffffULL
2345 /* convert MIPS rounding mode in FCR31 to IEEE library */
2346 static unsigned int ieee_rm[] = {
2347 float_round_nearest_even,
2348 float_round_to_zero,
2353 #define RESTORE_ROUNDING_MODE \
2354 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
2356 #define RESTORE_FLUSH_MODE \
2357 set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0, &env->active_fpu.fp_status);
2359 target_ulong helper_cfc1 (uint32_t reg)
2365 arg1 = (int32_t)env->active_fpu.fcr0;
2368 arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1);
2371 arg1 = env->active_fpu.fcr31 & 0x0003f07c;
2374 arg1 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4);
2377 arg1 = (int32_t)env->active_fpu.fcr31;
2384 void helper_ctc1 (target_ulong arg1, uint32_t reg)
2388 if (arg1 & 0xffffff00)
2390 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) |
2391 ((arg1 & 0x1) << 23);
2394 if (arg1 & 0x007c0000)
2396 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c);
2399 if (arg1 & 0x007c0000)
2401 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) |
2402 ((arg1 & 0x4) << 22);
2405 if (arg1 & 0x007c0000)
2407 env->active_fpu.fcr31 = arg1;
2412 /* set rounding mode */
2413 RESTORE_ROUNDING_MODE;
2414 /* set flush-to-zero mode */
2416 set_float_exception_flags(0, &env->active_fpu.fp_status);
2417 if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
2418 helper_raise_exception(EXCP_FPE);
2421 static inline int ieee_ex_to_mips(int xcpt)
2425 if (xcpt & float_flag_invalid) {
2428 if (xcpt & float_flag_overflow) {
2431 if (xcpt & float_flag_underflow) {
2432 ret |= FP_UNDERFLOW;
2434 if (xcpt & float_flag_divbyzero) {
2437 if (xcpt & float_flag_inexact) {
2444 static inline void update_fcr31(void)
2446 int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status));
2448 SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
2449 if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp)
2450 helper_raise_exception(EXCP_FPE);
2452 UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
2456 Single precition routines have a "s" suffix, double precision a
2457 "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2458 paired single lower "pl", paired single upper "pu". */
2460 /* unary operations, modifying fp status */
2461 uint64_t helper_float_sqrt_d(uint64_t fdt0)
2463 return float64_sqrt(fdt0, &env->active_fpu.fp_status);
2466 uint32_t helper_float_sqrt_s(uint32_t fst0)
2468 return float32_sqrt(fst0, &env->active_fpu.fp_status);
2471 uint64_t helper_float_cvtd_s(uint32_t fst0)
2475 set_float_exception_flags(0, &env->active_fpu.fp_status);
2476 fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
2481 uint64_t helper_float_cvtd_w(uint32_t wt0)
2485 set_float_exception_flags(0, &env->active_fpu.fp_status);
2486 fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
2491 uint64_t helper_float_cvtd_l(uint64_t dt0)
2495 set_float_exception_flags(0, &env->active_fpu.fp_status);
2496 fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
2501 uint64_t helper_float_cvtl_d(uint64_t fdt0)
2505 set_float_exception_flags(0, &env->active_fpu.fp_status);
2506 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2508 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2513 uint64_t helper_float_cvtl_s(uint32_t fst0)
2517 set_float_exception_flags(0, &env->active_fpu.fp_status);
2518 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2520 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2525 uint64_t helper_float_cvtps_pw(uint64_t dt0)
2530 set_float_exception_flags(0, &env->active_fpu.fp_status);
2531 fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2532 fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
2534 return ((uint64_t)fsth2 << 32) | fst2;
2537 uint64_t helper_float_cvtpw_ps(uint64_t fdt0)
2542 set_float_exception_flags(0, &env->active_fpu.fp_status);
2543 wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2544 wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
2546 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID)) {
2548 wth2 = FLOAT_SNAN32;
2550 return ((uint64_t)wth2 << 32) | wt2;
2553 uint32_t helper_float_cvts_d(uint64_t fdt0)
2557 set_float_exception_flags(0, &env->active_fpu.fp_status);
2558 fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
2563 uint32_t helper_float_cvts_w(uint32_t wt0)
2567 set_float_exception_flags(0, &env->active_fpu.fp_status);
2568 fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
2573 uint32_t helper_float_cvts_l(uint64_t dt0)
2577 set_float_exception_flags(0, &env->active_fpu.fp_status);
2578 fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
2583 uint32_t helper_float_cvts_pl(uint32_t wt0)
2587 set_float_exception_flags(0, &env->active_fpu.fp_status);
2593 uint32_t helper_float_cvts_pu(uint32_t wth0)
2597 set_float_exception_flags(0, &env->active_fpu.fp_status);
2603 uint32_t helper_float_cvtw_s(uint32_t fst0)
2607 set_float_exception_flags(0, &env->active_fpu.fp_status);
2608 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2610 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2615 uint32_t helper_float_cvtw_d(uint64_t fdt0)
2619 set_float_exception_flags(0, &env->active_fpu.fp_status);
2620 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2622 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2627 uint64_t helper_float_roundl_d(uint64_t fdt0)
2631 set_float_exception_flags(0, &env->active_fpu.fp_status);
2632 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2633 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2634 RESTORE_ROUNDING_MODE;
2636 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2641 uint64_t helper_float_roundl_s(uint32_t fst0)
2645 set_float_exception_flags(0, &env->active_fpu.fp_status);
2646 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2647 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2648 RESTORE_ROUNDING_MODE;
2650 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2655 uint32_t helper_float_roundw_d(uint64_t fdt0)
2659 set_float_exception_flags(0, &env->active_fpu.fp_status);
2660 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2661 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2662 RESTORE_ROUNDING_MODE;
2664 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2669 uint32_t helper_float_roundw_s(uint32_t fst0)
2673 set_float_exception_flags(0, &env->active_fpu.fp_status);
2674 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2675 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2676 RESTORE_ROUNDING_MODE;
2678 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2683 uint64_t helper_float_truncl_d(uint64_t fdt0)
2687 set_float_exception_flags(0, &env->active_fpu.fp_status);
2688 dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
2690 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2695 uint64_t helper_float_truncl_s(uint32_t fst0)
2699 set_float_exception_flags(0, &env->active_fpu.fp_status);
2700 dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
2702 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2707 uint32_t helper_float_truncw_d(uint64_t fdt0)
2711 set_float_exception_flags(0, &env->active_fpu.fp_status);
2712 wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
2714 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2719 uint32_t helper_float_truncw_s(uint32_t fst0)
2723 set_float_exception_flags(0, &env->active_fpu.fp_status);
2724 wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
2726 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2731 uint64_t helper_float_ceill_d(uint64_t fdt0)
2735 set_float_exception_flags(0, &env->active_fpu.fp_status);
2736 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2737 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2738 RESTORE_ROUNDING_MODE;
2740 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2745 uint64_t helper_float_ceill_s(uint32_t fst0)
2749 set_float_exception_flags(0, &env->active_fpu.fp_status);
2750 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2751 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2752 RESTORE_ROUNDING_MODE;
2754 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2759 uint32_t helper_float_ceilw_d(uint64_t fdt0)
2763 set_float_exception_flags(0, &env->active_fpu.fp_status);
2764 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2765 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2766 RESTORE_ROUNDING_MODE;
2768 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2773 uint32_t helper_float_ceilw_s(uint32_t fst0)
2777 set_float_exception_flags(0, &env->active_fpu.fp_status);
2778 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2779 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2780 RESTORE_ROUNDING_MODE;
2782 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2787 uint64_t helper_float_floorl_d(uint64_t fdt0)
2791 set_float_exception_flags(0, &env->active_fpu.fp_status);
2792 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2793 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2794 RESTORE_ROUNDING_MODE;
2796 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2801 uint64_t helper_float_floorl_s(uint32_t fst0)
2805 set_float_exception_flags(0, &env->active_fpu.fp_status);
2806 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2807 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2808 RESTORE_ROUNDING_MODE;
2810 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2815 uint32_t helper_float_floorw_d(uint64_t fdt0)
2819 set_float_exception_flags(0, &env->active_fpu.fp_status);
2820 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2821 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2822 RESTORE_ROUNDING_MODE;
2824 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2829 uint32_t helper_float_floorw_s(uint32_t fst0)
2833 set_float_exception_flags(0, &env->active_fpu.fp_status);
2834 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2835 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2836 RESTORE_ROUNDING_MODE;
2838 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2843 /* unary operations, not modifying fp status */
2844 #define FLOAT_UNOP(name) \
2845 uint64_t helper_float_ ## name ## _d(uint64_t fdt0) \
2847 return float64_ ## name(fdt0); \
2849 uint32_t helper_float_ ## name ## _s(uint32_t fst0) \
2851 return float32_ ## name(fst0); \
2853 uint64_t helper_float_ ## name ## _ps(uint64_t fdt0) \
2858 wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF); \
2859 wth0 = float32_ ## name(fdt0 >> 32); \
2860 return ((uint64_t)wth0 << 32) | wt0; \
2866 /* MIPS specific unary operations */
2867 uint64_t helper_float_recip_d(uint64_t fdt0)
2871 set_float_exception_flags(0, &env->active_fpu.fp_status);
2872 fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2877 uint32_t helper_float_recip_s(uint32_t fst0)
2881 set_float_exception_flags(0, &env->active_fpu.fp_status);
2882 fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2887 uint64_t helper_float_rsqrt_d(uint64_t fdt0)
2891 set_float_exception_flags(0, &env->active_fpu.fp_status);
2892 fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2893 fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2898 uint32_t helper_float_rsqrt_s(uint32_t fst0)
2902 set_float_exception_flags(0, &env->active_fpu.fp_status);
2903 fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2904 fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2909 uint64_t helper_float_recip1_d(uint64_t fdt0)
2913 set_float_exception_flags(0, &env->active_fpu.fp_status);
2914 fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2919 uint32_t helper_float_recip1_s(uint32_t fst0)
2923 set_float_exception_flags(0, &env->active_fpu.fp_status);
2924 fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2929 uint64_t helper_float_recip1_ps(uint64_t fdt0)
2934 set_float_exception_flags(0, &env->active_fpu.fp_status);
2935 fst2 = float32_div(FLOAT_ONE32, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2936 fsth2 = float32_div(FLOAT_ONE32, fdt0 >> 32, &env->active_fpu.fp_status);
2938 return ((uint64_t)fsth2 << 32) | fst2;
2941 uint64_t helper_float_rsqrt1_d(uint64_t fdt0)
2945 set_float_exception_flags(0, &env->active_fpu.fp_status);
2946 fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2947 fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2952 uint32_t helper_float_rsqrt1_s(uint32_t fst0)
2956 set_float_exception_flags(0, &env->active_fpu.fp_status);
2957 fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2958 fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2963 uint64_t helper_float_rsqrt1_ps(uint64_t fdt0)
2968 set_float_exception_flags(0, &env->active_fpu.fp_status);
2969 fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2970 fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
2971 fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2972 fsth2 = float32_div(FLOAT_ONE32, fsth2, &env->active_fpu.fp_status);
2974 return ((uint64_t)fsth2 << 32) | fst2;
2977 #define FLOAT_OP(name, p) void helper_float_##name##_##p(void)
2979 /* binary operations */
2980 #define FLOAT_BINOP(name) \
2981 uint64_t helper_float_ ## name ## _d(uint64_t fdt0, uint64_t fdt1) \
2985 set_float_exception_flags(0, &env->active_fpu.fp_status); \
2986 dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status); \
2988 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) \
2989 dt2 = FLOAT_QNAN64; \
2993 uint32_t helper_float_ ## name ## _s(uint32_t fst0, uint32_t fst1) \
2997 set_float_exception_flags(0, &env->active_fpu.fp_status); \
2998 wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
3000 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) \
3001 wt2 = FLOAT_QNAN32; \
3005 uint64_t helper_float_ ## name ## _ps(uint64_t fdt0, uint64_t fdt1) \
3007 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
3008 uint32_t fsth0 = fdt0 >> 32; \
3009 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
3010 uint32_t fsth1 = fdt1 >> 32; \
3014 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3015 wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
3016 wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status); \
3018 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) { \
3019 wt2 = FLOAT_QNAN32; \
3020 wth2 = FLOAT_QNAN32; \
3022 return ((uint64_t)wth2 << 32) | wt2; \
3031 /* ternary operations */
3032 #define FLOAT_TERNOP(name1, name2) \
3033 uint64_t helper_float_ ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
3036 fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status); \
3037 return float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status); \
3040 uint32_t helper_float_ ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
3043 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
3044 return float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
3047 uint64_t helper_float_ ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1, \
3050 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
3051 uint32_t fsth0 = fdt0 >> 32; \
3052 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
3053 uint32_t fsth1 = fdt1 >> 32; \
3054 uint32_t fst2 = fdt2 & 0XFFFFFFFF; \
3055 uint32_t fsth2 = fdt2 >> 32; \
3057 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
3058 fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status); \
3059 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
3060 fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status); \
3061 return ((uint64_t)fsth2 << 32) | fst2; \
3064 FLOAT_TERNOP(mul, add)
3065 FLOAT_TERNOP(mul, sub)
3068 /* negated ternary operations */
3069 #define FLOAT_NTERNOP(name1, name2) \
3070 uint64_t helper_float_n ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
3073 fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status); \
3074 fdt2 = float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status); \
3075 return float64_chs(fdt2); \
3078 uint32_t helper_float_n ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
3081 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
3082 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
3083 return float32_chs(fst2); \
3086 uint64_t helper_float_n ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1,\
3089 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
3090 uint32_t fsth0 = fdt0 >> 32; \
3091 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
3092 uint32_t fsth1 = fdt1 >> 32; \
3093 uint32_t fst2 = fdt2 & 0XFFFFFFFF; \
3094 uint32_t fsth2 = fdt2 >> 32; \
3096 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
3097 fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status); \
3098 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
3099 fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status); \
3100 fst2 = float32_chs(fst2); \
3101 fsth2 = float32_chs(fsth2); \
3102 return ((uint64_t)fsth2 << 32) | fst2; \
3105 FLOAT_NTERNOP(mul, add)
3106 FLOAT_NTERNOP(mul, sub)
3107 #undef FLOAT_NTERNOP
3109 /* MIPS specific binary operations */
3110 uint64_t helper_float_recip2_d(uint64_t fdt0, uint64_t fdt2)
3112 set_float_exception_flags(0, &env->active_fpu.fp_status);
3113 fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
3114 fdt2 = float64_chs(float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status));
3119 uint32_t helper_float_recip2_s(uint32_t fst0, uint32_t fst2)
3121 set_float_exception_flags(0, &env->active_fpu.fp_status);
3122 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3123 fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
3128 uint64_t helper_float_recip2_ps(uint64_t fdt0, uint64_t fdt2)
3130 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3131 uint32_t fsth0 = fdt0 >> 32;
3132 uint32_t fst2 = fdt2 & 0XFFFFFFFF;
3133 uint32_t fsth2 = fdt2 >> 32;
3135 set_float_exception_flags(0, &env->active_fpu.fp_status);
3136 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3137 fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
3138 fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
3139 fsth2 = float32_chs(float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status));
3141 return ((uint64_t)fsth2 << 32) | fst2;
3144 uint64_t helper_float_rsqrt2_d(uint64_t fdt0, uint64_t fdt2)
3146 set_float_exception_flags(0, &env->active_fpu.fp_status);
3147 fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
3148 fdt2 = float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status);
3149 fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status));
3154 uint32_t helper_float_rsqrt2_s(uint32_t fst0, uint32_t fst2)
3156 set_float_exception_flags(0, &env->active_fpu.fp_status);
3157 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3158 fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
3159 fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
3164 uint64_t helper_float_rsqrt2_ps(uint64_t fdt0, uint64_t fdt2)
3166 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3167 uint32_t fsth0 = fdt0 >> 32;
3168 uint32_t fst2 = fdt2 & 0XFFFFFFFF;
3169 uint32_t fsth2 = fdt2 >> 32;
3171 set_float_exception_flags(0, &env->active_fpu.fp_status);
3172 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3173 fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
3174 fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
3175 fsth2 = float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status);
3176 fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
3177 fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status));
3179 return ((uint64_t)fsth2 << 32) | fst2;
3182 uint64_t helper_float_addr_ps(uint64_t fdt0, uint64_t fdt1)
3184 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3185 uint32_t fsth0 = fdt0 >> 32;
3186 uint32_t fst1 = fdt1 & 0XFFFFFFFF;
3187 uint32_t fsth1 = fdt1 >> 32;
3191 set_float_exception_flags(0, &env->active_fpu.fp_status);
3192 fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status);
3193 fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status);
3195 return ((uint64_t)fsth2 << 32) | fst2;
3198 uint64_t helper_float_mulr_ps(uint64_t fdt0, uint64_t fdt1)
3200 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3201 uint32_t fsth0 = fdt0 >> 32;
3202 uint32_t fst1 = fdt1 & 0XFFFFFFFF;
3203 uint32_t fsth1 = fdt1 >> 32;
3207 set_float_exception_flags(0, &env->active_fpu.fp_status);
3208 fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status);
3209 fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status);
3211 return ((uint64_t)fsth2 << 32) | fst2;
3214 /* compare operations */
3215 #define FOP_COND_D(op, cond) \
3216 void helper_cmp_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3219 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3223 SET_FP_COND(cc, env->active_fpu); \
3225 CLEAR_FP_COND(cc, env->active_fpu); \
3227 void helper_cmpabs_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3230 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3231 fdt0 = float64_abs(fdt0); \
3232 fdt1 = float64_abs(fdt1); \
3236 SET_FP_COND(cc, env->active_fpu); \
3238 CLEAR_FP_COND(cc, env->active_fpu); \
3241 /* NOTE: the comma operator will make "cond" to eval to false,
3242 * but float64_unordered_quiet() is still called. */
3243 FOP_COND_D(f, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status), 0))
3244 FOP_COND_D(un, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status))
3245 FOP_COND_D(eq, float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3246 FOP_COND_D(ueq, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3247 FOP_COND_D(olt, float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3248 FOP_COND_D(ult, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3249 FOP_COND_D(ole, float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3250 FOP_COND_D(ule, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3251 /* NOTE: the comma operator will make "cond" to eval to false,
3252 * but float64_unordered() is still called. */
3253 FOP_COND_D(sf, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status), 0))
3254 FOP_COND_D(ngle,float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status))
3255 FOP_COND_D(seq, float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3256 FOP_COND_D(ngl, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3257 FOP_COND_D(lt, float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3258 FOP_COND_D(nge, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3259 FOP_COND_D(le, float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3260 FOP_COND_D(ngt, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3262 #define FOP_COND_S(op, cond) \
3263 void helper_cmp_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
3266 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3270 SET_FP_COND(cc, env->active_fpu); \
3272 CLEAR_FP_COND(cc, env->active_fpu); \
3274 void helper_cmpabs_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
3277 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3278 fst0 = float32_abs(fst0); \
3279 fst1 = float32_abs(fst1); \
3283 SET_FP_COND(cc, env->active_fpu); \
3285 CLEAR_FP_COND(cc, env->active_fpu); \
3288 /* NOTE: the comma operator will make "cond" to eval to false,
3289 * but float32_unordered_quiet() is still called. */
3290 FOP_COND_S(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0))
3291 FOP_COND_S(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status))
3292 FOP_COND_S(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
3293 FOP_COND_S(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
3294 FOP_COND_S(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
3295 FOP_COND_S(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
3296 FOP_COND_S(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
3297 FOP_COND_S(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
3298 /* NOTE: the comma operator will make "cond" to eval to false,
3299 * but float32_unordered() is still called. */
3300 FOP_COND_S(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0))
3301 FOP_COND_S(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status))
3302 FOP_COND_S(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3303 FOP_COND_S(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3304 FOP_COND_S(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3305 FOP_COND_S(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3306 FOP_COND_S(le, float32_le(fst0, fst1, &env->active_fpu.fp_status))
3307 FOP_COND_S(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status))
3309 #define FOP_COND_PS(op, condl, condh) \
3310 void helper_cmp_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3312 uint32_t fst0, fsth0, fst1, fsth1; \
3314 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3315 fst0 = fdt0 & 0XFFFFFFFF; \
3316 fsth0 = fdt0 >> 32; \
3317 fst1 = fdt1 & 0XFFFFFFFF; \
3318 fsth1 = fdt1 >> 32; \
3323 SET_FP_COND(cc, env->active_fpu); \
3325 CLEAR_FP_COND(cc, env->active_fpu); \
3327 SET_FP_COND(cc + 1, env->active_fpu); \
3329 CLEAR_FP_COND(cc + 1, env->active_fpu); \
3331 void helper_cmpabs_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3333 uint32_t fst0, fsth0, fst1, fsth1; \
3335 fst0 = float32_abs(fdt0 & 0XFFFFFFFF); \
3336 fsth0 = float32_abs(fdt0 >> 32); \
3337 fst1 = float32_abs(fdt1 & 0XFFFFFFFF); \
3338 fsth1 = float32_abs(fdt1 >> 32); \
3343 SET_FP_COND(cc, env->active_fpu); \
3345 CLEAR_FP_COND(cc, env->active_fpu); \
3347 SET_FP_COND(cc + 1, env->active_fpu); \
3349 CLEAR_FP_COND(cc + 1, env->active_fpu); \
3352 /* NOTE: the comma operator will make "cond" to eval to false,
3353 * but float32_unordered_quiet() is still called. */
3354 FOP_COND_PS(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0),
3355 (float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status), 0))
3356 FOP_COND_PS(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status),
3357 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status))
3358 FOP_COND_PS(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
3359 float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3360 FOP_COND_PS(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
3361 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3362 FOP_COND_PS(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
3363 float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3364 FOP_COND_PS(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
3365 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3366 FOP_COND_PS(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
3367 float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3368 FOP_COND_PS(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
3369 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3370 /* NOTE: the comma operator will make "cond" to eval to false,
3371 * but float32_unordered() is still called. */
3372 FOP_COND_PS(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0),
3373 (float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status), 0))
3374 FOP_COND_PS(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status),
3375 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status))
3376 FOP_COND_PS(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3377 float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3378 FOP_COND_PS(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3379 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3380 FOP_COND_PS(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3381 float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3382 FOP_COND_PS(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3383 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3384 FOP_COND_PS(le, float32_le(fst0, fst1, &env->active_fpu.fp_status),
3385 float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3386 FOP_COND_PS(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status),
3387 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))