2 * MIPS emulation helpers for qemu.
4 * Copyright (c) 2004-2005 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "dyngen-exec.h"
23 #include "host-utils.h"
27 #if !defined(CONFIG_USER_ONLY)
28 #include "softmmu_exec.h"
29 #endif /* !defined(CONFIG_USER_ONLY) */
31 #ifndef CONFIG_USER_ONLY
32 static inline void cpu_mips_tlb_flush (CPUState *env, int flush_global);
35 static inline void compute_hflags(CPUState *env)
37 env->hflags &= ~(MIPS_HFLAG_COP1X | MIPS_HFLAG_64 | MIPS_HFLAG_CP0 |
38 MIPS_HFLAG_F64 | MIPS_HFLAG_FPU | MIPS_HFLAG_KSU |
40 if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
41 !(env->CP0_Status & (1 << CP0St_ERL)) &&
42 !(env->hflags & MIPS_HFLAG_DM)) {
43 env->hflags |= (env->CP0_Status >> CP0St_KSU) & MIPS_HFLAG_KSU;
45 #if defined(TARGET_MIPS64)
46 if (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_UM) ||
47 (env->CP0_Status & (1 << CP0St_PX)) ||
48 (env->CP0_Status & (1 << CP0St_UX))) {
49 env->hflags |= MIPS_HFLAG_64;
51 if (env->CP0_Status & (1 << CP0St_UX)) {
52 env->hflags |= MIPS_HFLAG_UX;
55 if ((env->CP0_Status & (1 << CP0St_CU0)) ||
56 !(env->hflags & MIPS_HFLAG_KSU)) {
57 env->hflags |= MIPS_HFLAG_CP0;
59 if (env->CP0_Status & (1 << CP0St_CU1)) {
60 env->hflags |= MIPS_HFLAG_FPU;
62 if (env->CP0_Status & (1 << CP0St_FR)) {
63 env->hflags |= MIPS_HFLAG_F64;
65 if (env->insn_flags & ISA_MIPS32R2) {
66 if (env->active_fpu.fcr0 & (1 << FCR0_F64)) {
67 env->hflags |= MIPS_HFLAG_COP1X;
69 } else if (env->insn_flags & ISA_MIPS32) {
70 if (env->hflags & MIPS_HFLAG_64) {
71 env->hflags |= MIPS_HFLAG_COP1X;
73 } else if (env->insn_flags & ISA_MIPS4) {
74 /* All supported MIPS IV CPUs use the XX (CU3) to enable
75 and disable the MIPS IV extensions to the MIPS III ISA.
76 Some other MIPS IV CPUs ignore the bit, so the check here
77 would be too restrictive for them. */
78 if (env->CP0_Status & (1 << CP0St_CU3)) {
79 env->hflags |= MIPS_HFLAG_COP1X;
84 /*****************************************************************************/
85 /* Exceptions processing helpers */
87 void helper_raise_exception_err (uint32_t exception, int error_code)
90 if (exception < 0x100)
91 qemu_log("%s: %d %d\n", __func__, exception, error_code);
93 env->exception_index = exception;
94 env->error_code = error_code;
98 void helper_raise_exception (uint32_t exception)
100 helper_raise_exception_err(exception, 0);
103 #if !defined(CONFIG_USER_ONLY)
104 static void do_restore_state (void *pc_ptr)
106 TranslationBlock *tb;
107 unsigned long pc = (unsigned long) pc_ptr;
109 tb = tb_find_pc (pc);
111 cpu_restore_state(tb, env, pc);
116 #if defined(CONFIG_USER_ONLY)
117 #define HELPER_LD(name, insn, type) \
118 static inline type do_##name(target_ulong addr, int mem_idx) \
120 return (type) insn##_raw(addr); \
123 #define HELPER_LD(name, insn, type) \
124 static inline type do_##name(target_ulong addr, int mem_idx) \
128 case 0: return (type) insn##_kernel(addr); break; \
129 case 1: return (type) insn##_super(addr); break; \
131 case 2: return (type) insn##_user(addr); break; \
135 HELPER_LD(lbu, ldub, uint8_t)
136 HELPER_LD(lw, ldl, int32_t)
138 HELPER_LD(ld, ldq, int64_t)
142 #if defined(CONFIG_USER_ONLY)
143 #define HELPER_ST(name, insn, type) \
144 static inline void do_##name(target_ulong addr, type val, int mem_idx) \
146 insn##_raw(addr, val); \
149 #define HELPER_ST(name, insn, type) \
150 static inline void do_##name(target_ulong addr, type val, int mem_idx) \
154 case 0: insn##_kernel(addr, val); break; \
155 case 1: insn##_super(addr, val); break; \
157 case 2: insn##_user(addr, val); break; \
161 HELPER_ST(sb, stb, uint8_t)
162 HELPER_ST(sw, stl, uint32_t)
164 HELPER_ST(sd, stq, uint64_t)
168 target_ulong helper_clo (target_ulong arg1)
173 target_ulong helper_clz (target_ulong arg1)
178 #if defined(TARGET_MIPS64)
179 target_ulong helper_dclo (target_ulong arg1)
184 target_ulong helper_dclz (target_ulong arg1)
188 #endif /* TARGET_MIPS64 */
190 /* 64 bits arithmetic for 32 bits hosts */
191 static inline uint64_t get_HILO (void)
193 return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
196 static inline void set_HILO (uint64_t HILO)
198 env->active_tc.LO[0] = (int32_t)HILO;
199 env->active_tc.HI[0] = (int32_t)(HILO >> 32);
202 static inline void set_HIT0_LO (target_ulong arg1, uint64_t HILO)
204 env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
205 arg1 = env->active_tc.HI[0] = (int32_t)(HILO >> 32);
208 static inline void set_HI_LOT0 (target_ulong arg1, uint64_t HILO)
210 arg1 = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
211 env->active_tc.HI[0] = (int32_t)(HILO >> 32);
214 /* Multiplication variants of the vr54xx. */
215 target_ulong helper_muls (target_ulong arg1, target_ulong arg2)
217 set_HI_LOT0(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
222 target_ulong helper_mulsu (target_ulong arg1, target_ulong arg2)
224 set_HI_LOT0(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
229 target_ulong helper_macc (target_ulong arg1, target_ulong arg2)
231 set_HI_LOT0(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
236 target_ulong helper_macchi (target_ulong arg1, target_ulong arg2)
238 set_HIT0_LO(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
243 target_ulong helper_maccu (target_ulong arg1, target_ulong arg2)
245 set_HI_LOT0(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
250 target_ulong helper_macchiu (target_ulong arg1, target_ulong arg2)
252 set_HIT0_LO(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
257 target_ulong helper_msac (target_ulong arg1, target_ulong arg2)
259 set_HI_LOT0(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
264 target_ulong helper_msachi (target_ulong arg1, target_ulong arg2)
266 set_HIT0_LO(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
271 target_ulong helper_msacu (target_ulong arg1, target_ulong arg2)
273 set_HI_LOT0(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
278 target_ulong helper_msachiu (target_ulong arg1, target_ulong arg2)
280 set_HIT0_LO(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
285 target_ulong helper_mulhi (target_ulong arg1, target_ulong arg2)
287 set_HIT0_LO(arg1, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
292 target_ulong helper_mulhiu (target_ulong arg1, target_ulong arg2)
294 set_HIT0_LO(arg1, (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
299 target_ulong helper_mulshi (target_ulong arg1, target_ulong arg2)
301 set_HIT0_LO(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
306 target_ulong helper_mulshiu (target_ulong arg1, target_ulong arg2)
308 set_HIT0_LO(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
314 void helper_dmult (target_ulong arg1, target_ulong arg2)
316 muls64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
319 void helper_dmultu (target_ulong arg1, target_ulong arg2)
321 mulu64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
325 #ifndef CONFIG_USER_ONLY
327 static inline target_phys_addr_t do_translate_address(target_ulong address, int rw)
329 target_phys_addr_t lladdr;
331 lladdr = cpu_mips_translate_address(env, address, rw);
333 if (lladdr == -1LL) {
340 #define HELPER_LD_ATOMIC(name, insn) \
341 target_ulong helper_##name(target_ulong arg, int mem_idx) \
343 env->lladdr = do_translate_address(arg, 0); \
344 env->llval = do_##insn(arg, mem_idx); \
347 HELPER_LD_ATOMIC(ll, lw)
349 HELPER_LD_ATOMIC(lld, ld)
351 #undef HELPER_LD_ATOMIC
353 #define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask) \
354 target_ulong helper_##name(target_ulong arg1, target_ulong arg2, int mem_idx) \
358 if (arg2 & almask) { \
359 env->CP0_BadVAddr = arg2; \
360 helper_raise_exception(EXCP_AdES); \
362 if (do_translate_address(arg2, 1) == env->lladdr) { \
363 tmp = do_##ld_insn(arg2, mem_idx); \
364 if (tmp == env->llval) { \
365 do_##st_insn(arg2, arg1, mem_idx); \
371 HELPER_ST_ATOMIC(sc, lw, sw, 0x3)
373 HELPER_ST_ATOMIC(scd, ld, sd, 0x7)
375 #undef HELPER_ST_ATOMIC
378 #ifdef TARGET_WORDS_BIGENDIAN
379 #define GET_LMASK(v) ((v) & 3)
380 #define GET_OFFSET(addr, offset) (addr + (offset))
382 #define GET_LMASK(v) (((v) & 3) ^ 3)
383 #define GET_OFFSET(addr, offset) (addr - (offset))
386 target_ulong helper_lwl(target_ulong arg1, target_ulong arg2, int mem_idx)
390 tmp = do_lbu(arg2, mem_idx);
391 arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
393 if (GET_LMASK(arg2) <= 2) {
394 tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
395 arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
398 if (GET_LMASK(arg2) <= 1) {
399 tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
400 arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
403 if (GET_LMASK(arg2) == 0) {
404 tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
405 arg1 = (arg1 & 0xFFFFFF00) | tmp;
407 return (int32_t)arg1;
410 target_ulong helper_lwr(target_ulong arg1, target_ulong arg2, int mem_idx)
414 tmp = do_lbu(arg2, mem_idx);
415 arg1 = (arg1 & 0xFFFFFF00) | tmp;
417 if (GET_LMASK(arg2) >= 1) {
418 tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
419 arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
422 if (GET_LMASK(arg2) >= 2) {
423 tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
424 arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
427 if (GET_LMASK(arg2) == 3) {
428 tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
429 arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
431 return (int32_t)arg1;
434 void helper_swl(target_ulong arg1, target_ulong arg2, int mem_idx)
436 do_sb(arg2, (uint8_t)(arg1 >> 24), mem_idx);
438 if (GET_LMASK(arg2) <= 2)
439 do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx);
441 if (GET_LMASK(arg2) <= 1)
442 do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx);
444 if (GET_LMASK(arg2) == 0)
445 do_sb(GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx);
448 void helper_swr(target_ulong arg1, target_ulong arg2, int mem_idx)
450 do_sb(arg2, (uint8_t)arg1, mem_idx);
452 if (GET_LMASK(arg2) >= 1)
453 do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
455 if (GET_LMASK(arg2) >= 2)
456 do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
458 if (GET_LMASK(arg2) == 3)
459 do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
462 #if defined(TARGET_MIPS64)
463 /* "half" load and stores. We must do the memory access inline,
464 or fault handling won't work. */
466 #ifdef TARGET_WORDS_BIGENDIAN
467 #define GET_LMASK64(v) ((v) & 7)
469 #define GET_LMASK64(v) (((v) & 7) ^ 7)
472 target_ulong helper_ldl(target_ulong arg1, target_ulong arg2, int mem_idx)
476 tmp = do_lbu(arg2, mem_idx);
477 arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
479 if (GET_LMASK64(arg2) <= 6) {
480 tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
481 arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
484 if (GET_LMASK64(arg2) <= 5) {
485 tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
486 arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
489 if (GET_LMASK64(arg2) <= 4) {
490 tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
491 arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
494 if (GET_LMASK64(arg2) <= 3) {
495 tmp = do_lbu(GET_OFFSET(arg2, 4), mem_idx);
496 arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
499 if (GET_LMASK64(arg2) <= 2) {
500 tmp = do_lbu(GET_OFFSET(arg2, 5), mem_idx);
501 arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
504 if (GET_LMASK64(arg2) <= 1) {
505 tmp = do_lbu(GET_OFFSET(arg2, 6), mem_idx);
506 arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
509 if (GET_LMASK64(arg2) == 0) {
510 tmp = do_lbu(GET_OFFSET(arg2, 7), mem_idx);
511 arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
517 target_ulong helper_ldr(target_ulong arg1, target_ulong arg2, int mem_idx)
521 tmp = do_lbu(arg2, mem_idx);
522 arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
524 if (GET_LMASK64(arg2) >= 1) {
525 tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
526 arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
529 if (GET_LMASK64(arg2) >= 2) {
530 tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
531 arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
534 if (GET_LMASK64(arg2) >= 3) {
535 tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
536 arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
539 if (GET_LMASK64(arg2) >= 4) {
540 tmp = do_lbu(GET_OFFSET(arg2, -4), mem_idx);
541 arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
544 if (GET_LMASK64(arg2) >= 5) {
545 tmp = do_lbu(GET_OFFSET(arg2, -5), mem_idx);
546 arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
549 if (GET_LMASK64(arg2) >= 6) {
550 tmp = do_lbu(GET_OFFSET(arg2, -6), mem_idx);
551 arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
554 if (GET_LMASK64(arg2) == 7) {
555 tmp = do_lbu(GET_OFFSET(arg2, -7), mem_idx);
556 arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
562 void helper_sdl(target_ulong arg1, target_ulong arg2, int mem_idx)
564 do_sb(arg2, (uint8_t)(arg1 >> 56), mem_idx);
566 if (GET_LMASK64(arg2) <= 6)
567 do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx);
569 if (GET_LMASK64(arg2) <= 5)
570 do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx);
572 if (GET_LMASK64(arg2) <= 4)
573 do_sb(GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx);
575 if (GET_LMASK64(arg2) <= 3)
576 do_sb(GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx);
578 if (GET_LMASK64(arg2) <= 2)
579 do_sb(GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx);
581 if (GET_LMASK64(arg2) <= 1)
582 do_sb(GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx);
584 if (GET_LMASK64(arg2) <= 0)
585 do_sb(GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx);
588 void helper_sdr(target_ulong arg1, target_ulong arg2, int mem_idx)
590 do_sb(arg2, (uint8_t)arg1, mem_idx);
592 if (GET_LMASK64(arg2) >= 1)
593 do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
595 if (GET_LMASK64(arg2) >= 2)
596 do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
598 if (GET_LMASK64(arg2) >= 3)
599 do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
601 if (GET_LMASK64(arg2) >= 4)
602 do_sb(GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx);
604 if (GET_LMASK64(arg2) >= 5)
605 do_sb(GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx);
607 if (GET_LMASK64(arg2) >= 6)
608 do_sb(GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx);
610 if (GET_LMASK64(arg2) == 7)
611 do_sb(GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx);
613 #endif /* TARGET_MIPS64 */
615 static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
617 void helper_lwm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
619 target_ulong base_reglist = reglist & 0xf;
620 target_ulong do_r31 = reglist & 0x10;
621 #ifdef CONFIG_USER_ONLY
623 #define ldfun ldl_raw
625 uint32_t (*ldfun)(target_ulong);
629 case 0: ldfun = ldl_kernel; break;
630 case 1: ldfun = ldl_super; break;
632 case 2: ldfun = ldl_user; break;
636 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
639 for (i = 0; i < base_reglist; i++) {
640 env->active_tc.gpr[multiple_regs[i]] = (target_long) ldfun(addr);
646 env->active_tc.gpr[31] = (target_long) ldfun(addr);
650 void helper_swm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
652 target_ulong base_reglist = reglist & 0xf;
653 target_ulong do_r31 = reglist & 0x10;
654 #ifdef CONFIG_USER_ONLY
656 #define stfun stl_raw
658 void (*stfun)(target_ulong, uint32_t);
662 case 0: stfun = stl_kernel; break;
663 case 1: stfun = stl_super; break;
665 case 2: stfun = stl_user; break;
669 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
672 for (i = 0; i < base_reglist; i++) {
673 stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
679 stfun(addr, env->active_tc.gpr[31]);
683 #if defined(TARGET_MIPS64)
684 void helper_ldm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
686 target_ulong base_reglist = reglist & 0xf;
687 target_ulong do_r31 = reglist & 0x10;
688 #ifdef CONFIG_USER_ONLY
690 #define ldfun ldq_raw
692 uint64_t (*ldfun)(target_ulong);
696 case 0: ldfun = ldq_kernel; break;
697 case 1: ldfun = ldq_super; break;
699 case 2: ldfun = ldq_user; break;
703 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
706 for (i = 0; i < base_reglist; i++) {
707 env->active_tc.gpr[multiple_regs[i]] = ldfun(addr);
713 env->active_tc.gpr[31] = ldfun(addr);
717 void helper_sdm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
719 target_ulong base_reglist = reglist & 0xf;
720 target_ulong do_r31 = reglist & 0x10;
721 #ifdef CONFIG_USER_ONLY
723 #define stfun stq_raw
725 void (*stfun)(target_ulong, uint64_t);
729 case 0: stfun = stq_kernel; break;
730 case 1: stfun = stq_super; break;
732 case 2: stfun = stq_user; break;
736 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
739 for (i = 0; i < base_reglist; i++) {
740 stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
746 stfun(addr, env->active_tc.gpr[31]);
751 #ifndef CONFIG_USER_ONLY
753 static int mips_vpe_is_wfi(CPUState *c)
755 /* If the VPE is halted but otherwise active, it means it's waiting for
757 return c->halted && mips_vpe_active(c);
760 static inline void mips_vpe_wake(CPUState *c)
762 /* Dont set ->halted = 0 directly, let it be done via cpu_has_work
763 because there might be other conditions that state that c should
765 cpu_interrupt(c, CPU_INTERRUPT_WAKE);
768 static inline void mips_vpe_sleep(CPUState *c)
770 /* The VPE was shut off, really go to bed.
771 Reset any old _WAKE requests. */
773 cpu_reset_interrupt(c, CPU_INTERRUPT_WAKE);
776 static inline void mips_tc_wake(CPUState *c, int tc)
778 /* FIXME: TC reschedule. */
779 if (mips_vpe_active(c) && !mips_vpe_is_wfi(c)) {
784 static inline void mips_tc_sleep(CPUState *c, int tc)
786 /* FIXME: TC reschedule. */
787 if (!mips_vpe_active(c)) {
792 /* tc should point to an int with the value of the global TC index.
793 This function will transform it into a local index within the
796 FIXME: This code assumes that all VPEs have the same number of TCs,
797 which depends on runtime setup. Can probably be fixed by
798 walking the list of CPUStates. */
799 static CPUState *mips_cpu_map_tc(int *tc)
802 int vpe_idx, nr_threads = env->nr_threads;
805 if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) {
806 /* Not allowed to address other CPUs. */
807 *tc = env->current_tc;
811 vpe_idx = tc_idx / nr_threads;
812 *tc = tc_idx % nr_threads;
813 other = qemu_get_cpu(vpe_idx);
814 return other ? other : env;
817 /* The per VPE CP0_Status register shares some fields with the per TC
818 CP0_TCStatus registers. These fields are wired to the same registers,
819 so changes to either of them should be reflected on both registers.
821 Also, EntryHi shares the bottom 8 bit ASID with TCStauts.
823 These helper call synchronizes the regs for a given cpu. */
825 /* Called for updates to CP0_Status. */
826 static void sync_c0_status(CPUState *cpu, int tc)
828 int32_t tcstatus, *tcst;
829 uint32_t v = cpu->CP0_Status;
830 uint32_t cu, mx, asid, ksu;
831 uint32_t mask = ((1 << CP0TCSt_TCU3)
832 | (1 << CP0TCSt_TCU2)
833 | (1 << CP0TCSt_TCU1)
834 | (1 << CP0TCSt_TCU0)
836 | (3 << CP0TCSt_TKSU)
837 | (0xff << CP0TCSt_TASID));
839 cu = (v >> CP0St_CU0) & 0xf;
840 mx = (v >> CP0St_MX) & 0x1;
841 ksu = (v >> CP0St_KSU) & 0x3;
842 asid = env->CP0_EntryHi & 0xff;
844 tcstatus = cu << CP0TCSt_TCU0;
845 tcstatus |= mx << CP0TCSt_TMX;
846 tcstatus |= ksu << CP0TCSt_TKSU;
849 if (tc == cpu->current_tc) {
850 tcst = &cpu->active_tc.CP0_TCStatus;
852 tcst = &cpu->tcs[tc].CP0_TCStatus;
860 /* Called for updates to CP0_TCStatus. */
861 static void sync_c0_tcstatus(CPUState *cpu, int tc, target_ulong v)
864 uint32_t tcu, tmx, tasid, tksu;
865 uint32_t mask = ((1 << CP0St_CU3)
872 tcu = (v >> CP0TCSt_TCU0) & 0xf;
873 tmx = (v >> CP0TCSt_TMX) & 0x1;
875 tksu = (v >> CP0TCSt_TKSU) & 0x3;
877 status = tcu << CP0St_CU0;
878 status |= tmx << CP0St_MX;
879 status |= tksu << CP0St_KSU;
881 cpu->CP0_Status &= ~mask;
882 cpu->CP0_Status |= status;
884 /* Sync the TASID with EntryHi. */
885 cpu->CP0_EntryHi &= ~0xff;
886 cpu->CP0_EntryHi = tasid;
891 /* Called for updates to CP0_EntryHi. */
892 static void sync_c0_entryhi(CPUState *cpu, int tc)
895 uint32_t asid, v = cpu->CP0_EntryHi;
899 if (tc == cpu->current_tc) {
900 tcst = &cpu->active_tc.CP0_TCStatus;
902 tcst = &cpu->tcs[tc].CP0_TCStatus;
910 target_ulong helper_mfc0_mvpcontrol (void)
912 return env->mvp->CP0_MVPControl;
915 target_ulong helper_mfc0_mvpconf0 (void)
917 return env->mvp->CP0_MVPConf0;
920 target_ulong helper_mfc0_mvpconf1 (void)
922 return env->mvp->CP0_MVPConf1;
925 target_ulong helper_mfc0_random (void)
927 return (int32_t)cpu_mips_get_random(env);
930 target_ulong helper_mfc0_tcstatus (void)
932 return env->active_tc.CP0_TCStatus;
935 target_ulong helper_mftc0_tcstatus(void)
937 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
938 CPUState *other = mips_cpu_map_tc(&other_tc);
940 if (other_tc == other->current_tc)
941 return other->active_tc.CP0_TCStatus;
943 return other->tcs[other_tc].CP0_TCStatus;
946 target_ulong helper_mfc0_tcbind (void)
948 return env->active_tc.CP0_TCBind;
951 target_ulong helper_mftc0_tcbind(void)
953 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
954 CPUState *other = mips_cpu_map_tc(&other_tc);
956 if (other_tc == other->current_tc)
957 return other->active_tc.CP0_TCBind;
959 return other->tcs[other_tc].CP0_TCBind;
962 target_ulong helper_mfc0_tcrestart (void)
964 return env->active_tc.PC;
967 target_ulong helper_mftc0_tcrestart(void)
969 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
970 CPUState *other = mips_cpu_map_tc(&other_tc);
972 if (other_tc == other->current_tc)
973 return other->active_tc.PC;
975 return other->tcs[other_tc].PC;
978 target_ulong helper_mfc0_tchalt (void)
980 return env->active_tc.CP0_TCHalt;
983 target_ulong helper_mftc0_tchalt(void)
985 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
986 CPUState *other = mips_cpu_map_tc(&other_tc);
988 if (other_tc == other->current_tc)
989 return other->active_tc.CP0_TCHalt;
991 return other->tcs[other_tc].CP0_TCHalt;
994 target_ulong helper_mfc0_tccontext (void)
996 return env->active_tc.CP0_TCContext;
999 target_ulong helper_mftc0_tccontext(void)
1001 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1002 CPUState *other = mips_cpu_map_tc(&other_tc);
1004 if (other_tc == other->current_tc)
1005 return other->active_tc.CP0_TCContext;
1007 return other->tcs[other_tc].CP0_TCContext;
1010 target_ulong helper_mfc0_tcschedule (void)
1012 return env->active_tc.CP0_TCSchedule;
1015 target_ulong helper_mftc0_tcschedule(void)
1017 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1018 CPUState *other = mips_cpu_map_tc(&other_tc);
1020 if (other_tc == other->current_tc)
1021 return other->active_tc.CP0_TCSchedule;
1023 return other->tcs[other_tc].CP0_TCSchedule;
1026 target_ulong helper_mfc0_tcschefback (void)
1028 return env->active_tc.CP0_TCScheFBack;
1031 target_ulong helper_mftc0_tcschefback(void)
1033 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1034 CPUState *other = mips_cpu_map_tc(&other_tc);
1036 if (other_tc == other->current_tc)
1037 return other->active_tc.CP0_TCScheFBack;
1039 return other->tcs[other_tc].CP0_TCScheFBack;
1042 target_ulong helper_mfc0_count (void)
1044 return (int32_t)cpu_mips_get_count(env);
1047 target_ulong helper_mftc0_entryhi(void)
1049 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1050 CPUState *other = mips_cpu_map_tc(&other_tc);
1052 return other->CP0_EntryHi;
1055 target_ulong helper_mftc0_cause(void)
1057 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1059 CPUState *other = mips_cpu_map_tc(&other_tc);
1061 if (other_tc == other->current_tc) {
1062 tccause = other->CP0_Cause;
1064 tccause = other->CP0_Cause;
1070 target_ulong helper_mftc0_status(void)
1072 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1073 CPUState *other = mips_cpu_map_tc(&other_tc);
1075 return other->CP0_Status;
1078 target_ulong helper_mfc0_lladdr (void)
1080 return (int32_t)(env->lladdr >> env->CP0_LLAddr_shift);
1083 target_ulong helper_mfc0_watchlo (uint32_t sel)
1085 return (int32_t)env->CP0_WatchLo[sel];
1088 target_ulong helper_mfc0_watchhi (uint32_t sel)
1090 return env->CP0_WatchHi[sel];
1093 target_ulong helper_mfc0_debug (void)
1095 target_ulong t0 = env->CP0_Debug;
1096 if (env->hflags & MIPS_HFLAG_DM)
1097 t0 |= 1 << CP0DB_DM;
1102 target_ulong helper_mftc0_debug(void)
1104 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1106 CPUState *other = mips_cpu_map_tc(&other_tc);
1108 if (other_tc == other->current_tc)
1109 tcstatus = other->active_tc.CP0_Debug_tcstatus;
1111 tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus;
1113 /* XXX: Might be wrong, check with EJTAG spec. */
1114 return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1115 (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1118 #if defined(TARGET_MIPS64)
1119 target_ulong helper_dmfc0_tcrestart (void)
1121 return env->active_tc.PC;
1124 target_ulong helper_dmfc0_tchalt (void)
1126 return env->active_tc.CP0_TCHalt;
1129 target_ulong helper_dmfc0_tccontext (void)
1131 return env->active_tc.CP0_TCContext;
1134 target_ulong helper_dmfc0_tcschedule (void)
1136 return env->active_tc.CP0_TCSchedule;
1139 target_ulong helper_dmfc0_tcschefback (void)
1141 return env->active_tc.CP0_TCScheFBack;
1144 target_ulong helper_dmfc0_lladdr (void)
1146 return env->lladdr >> env->CP0_LLAddr_shift;
1149 target_ulong helper_dmfc0_watchlo (uint32_t sel)
1151 return env->CP0_WatchLo[sel];
1153 #endif /* TARGET_MIPS64 */
1155 void helper_mtc0_index (target_ulong arg1)
1158 unsigned int tmp = env->tlb->nb_tlb;
1164 env->CP0_Index = (env->CP0_Index & 0x80000000) | (arg1 & (num - 1));
1167 void helper_mtc0_mvpcontrol (target_ulong arg1)
1172 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
1173 mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
1174 (1 << CP0MVPCo_EVP);
1175 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1176 mask |= (1 << CP0MVPCo_STLB);
1177 newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
1179 // TODO: Enable/disable shared TLB, enable/disable VPEs.
1181 env->mvp->CP0_MVPControl = newval;
1184 void helper_mtc0_vpecontrol (target_ulong arg1)
1189 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
1190 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
1191 newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
1193 /* Yield scheduler intercept not implemented. */
1194 /* Gating storage scheduler intercept not implemented. */
1196 // TODO: Enable/disable TCs.
1198 env->CP0_VPEControl = newval;
1201 void helper_mttc0_vpecontrol(target_ulong arg1)
1203 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1204 CPUState *other = mips_cpu_map_tc(&other_tc);
1208 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
1209 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
1210 newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask);
1212 /* TODO: Enable/disable TCs. */
1214 other->CP0_VPEControl = newval;
1217 target_ulong helper_mftc0_vpecontrol(void)
1219 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1220 CPUState *other = mips_cpu_map_tc(&other_tc);
1221 /* FIXME: Mask away return zero on read bits. */
1222 return other->CP0_VPEControl;
1225 target_ulong helper_mftc0_vpeconf0(void)
1227 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1228 CPUState *other = mips_cpu_map_tc(&other_tc);
1230 return other->CP0_VPEConf0;
1233 void helper_mtc0_vpeconf0 (target_ulong arg1)
1238 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
1239 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
1240 mask |= (0xff << CP0VPEC0_XTC);
1241 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1243 newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1245 // TODO: TC exclusive handling due to ERL/EXL.
1247 env->CP0_VPEConf0 = newval;
1250 void helper_mttc0_vpeconf0(target_ulong arg1)
1252 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1253 CPUState *other = mips_cpu_map_tc(&other_tc);
1257 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1258 newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1260 /* TODO: TC exclusive handling due to ERL/EXL. */
1261 other->CP0_VPEConf0 = newval;
1264 void helper_mtc0_vpeconf1 (target_ulong arg1)
1269 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1270 mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
1271 (0xff << CP0VPEC1_NCP1);
1272 newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
1274 /* UDI not implemented. */
1275 /* CP2 not implemented. */
1277 // TODO: Handle FPU (CP1) binding.
1279 env->CP0_VPEConf1 = newval;
1282 void helper_mtc0_yqmask (target_ulong arg1)
1284 /* Yield qualifier inputs not implemented. */
1285 env->CP0_YQMask = 0x00000000;
1288 void helper_mtc0_vpeopt (target_ulong arg1)
1290 env->CP0_VPEOpt = arg1 & 0x0000ffff;
1293 void helper_mtc0_entrylo0 (target_ulong arg1)
1295 /* Large physaddr (PABITS) not implemented */
1296 /* 1k pages not implemented */
1297 env->CP0_EntryLo0 = arg1 & 0x3FFFFFFF;
1300 void helper_mtc0_tcstatus (target_ulong arg1)
1302 uint32_t mask = env->CP0_TCStatus_rw_bitmask;
1305 newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
1307 env->active_tc.CP0_TCStatus = newval;
1308 sync_c0_tcstatus(env, env->current_tc, newval);
1311 void helper_mttc0_tcstatus (target_ulong arg1)
1313 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1314 CPUState *other = mips_cpu_map_tc(&other_tc);
1316 if (other_tc == other->current_tc)
1317 other->active_tc.CP0_TCStatus = arg1;
1319 other->tcs[other_tc].CP0_TCStatus = arg1;
1320 sync_c0_tcstatus(other, other_tc, arg1);
1323 void helper_mtc0_tcbind (target_ulong arg1)
1325 uint32_t mask = (1 << CP0TCBd_TBE);
1328 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1329 mask |= (1 << CP0TCBd_CurVPE);
1330 newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1331 env->active_tc.CP0_TCBind = newval;
1334 void helper_mttc0_tcbind (target_ulong arg1)
1336 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1337 uint32_t mask = (1 << CP0TCBd_TBE);
1339 CPUState *other = mips_cpu_map_tc(&other_tc);
1341 if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1342 mask |= (1 << CP0TCBd_CurVPE);
1343 if (other_tc == other->current_tc) {
1344 newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1345 other->active_tc.CP0_TCBind = newval;
1347 newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
1348 other->tcs[other_tc].CP0_TCBind = newval;
1352 void helper_mtc0_tcrestart (target_ulong arg1)
1354 env->active_tc.PC = arg1;
1355 env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1357 /* MIPS16 not implemented. */
1360 void helper_mttc0_tcrestart (target_ulong arg1)
1362 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1363 CPUState *other = mips_cpu_map_tc(&other_tc);
1365 if (other_tc == other->current_tc) {
1366 other->active_tc.PC = arg1;
1367 other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1368 other->lladdr = 0ULL;
1369 /* MIPS16 not implemented. */
1371 other->tcs[other_tc].PC = arg1;
1372 other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1373 other->lladdr = 0ULL;
1374 /* MIPS16 not implemented. */
1378 void helper_mtc0_tchalt (target_ulong arg1)
1380 env->active_tc.CP0_TCHalt = arg1 & 0x1;
1382 // TODO: Halt TC / Restart (if allocated+active) TC.
1383 if (env->active_tc.CP0_TCHalt & 1) {
1384 mips_tc_sleep(env, env->current_tc);
1386 mips_tc_wake(env, env->current_tc);
1390 void helper_mttc0_tchalt (target_ulong arg1)
1392 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1393 CPUState *other = mips_cpu_map_tc(&other_tc);
1395 // TODO: Halt TC / Restart (if allocated+active) TC.
1397 if (other_tc == other->current_tc)
1398 other->active_tc.CP0_TCHalt = arg1;
1400 other->tcs[other_tc].CP0_TCHalt = arg1;
1403 mips_tc_sleep(other, other_tc);
1405 mips_tc_wake(other, other_tc);
1409 void helper_mtc0_tccontext (target_ulong arg1)
1411 env->active_tc.CP0_TCContext = arg1;
1414 void helper_mttc0_tccontext (target_ulong arg1)
1416 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1417 CPUState *other = mips_cpu_map_tc(&other_tc);
1419 if (other_tc == other->current_tc)
1420 other->active_tc.CP0_TCContext = arg1;
1422 other->tcs[other_tc].CP0_TCContext = arg1;
1425 void helper_mtc0_tcschedule (target_ulong arg1)
1427 env->active_tc.CP0_TCSchedule = arg1;
1430 void helper_mttc0_tcschedule (target_ulong arg1)
1432 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1433 CPUState *other = mips_cpu_map_tc(&other_tc);
1435 if (other_tc == other->current_tc)
1436 other->active_tc.CP0_TCSchedule = arg1;
1438 other->tcs[other_tc].CP0_TCSchedule = arg1;
1441 void helper_mtc0_tcschefback (target_ulong arg1)
1443 env->active_tc.CP0_TCScheFBack = arg1;
1446 void helper_mttc0_tcschefback (target_ulong arg1)
1448 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1449 CPUState *other = mips_cpu_map_tc(&other_tc);
1451 if (other_tc == other->current_tc)
1452 other->active_tc.CP0_TCScheFBack = arg1;
1454 other->tcs[other_tc].CP0_TCScheFBack = arg1;
1457 void helper_mtc0_entrylo1 (target_ulong arg1)
1459 /* Large physaddr (PABITS) not implemented */
1460 /* 1k pages not implemented */
1461 env->CP0_EntryLo1 = arg1 & 0x3FFFFFFF;
1464 void helper_mtc0_context (target_ulong arg1)
1466 env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
1469 void helper_mtc0_pagemask (target_ulong arg1)
1471 /* 1k pages not implemented */
1472 env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
1475 void helper_mtc0_pagegrain (target_ulong arg1)
1477 /* SmartMIPS not implemented */
1478 /* Large physaddr (PABITS) not implemented */
1479 /* 1k pages not implemented */
1480 env->CP0_PageGrain = 0;
1483 void helper_mtc0_wired (target_ulong arg1)
1485 env->CP0_Wired = arg1 % env->tlb->nb_tlb;
1488 void helper_mtc0_srsconf0 (target_ulong arg1)
1490 env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
1493 void helper_mtc0_srsconf1 (target_ulong arg1)
1495 env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
1498 void helper_mtc0_srsconf2 (target_ulong arg1)
1500 env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
1503 void helper_mtc0_srsconf3 (target_ulong arg1)
1505 env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
1508 void helper_mtc0_srsconf4 (target_ulong arg1)
1510 env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
1513 void helper_mtc0_hwrena (target_ulong arg1)
1515 env->CP0_HWREna = arg1 & 0x0000000F;
1518 void helper_mtc0_count (target_ulong arg1)
1520 cpu_mips_store_count(env, arg1);
1523 void helper_mtc0_entryhi (target_ulong arg1)
1525 target_ulong old, val;
1527 /* 1k pages not implemented */
1528 val = arg1 & ((TARGET_PAGE_MASK << 1) | 0xFF);
1529 #if defined(TARGET_MIPS64)
1530 val &= env->SEGMask;
1532 old = env->CP0_EntryHi;
1533 env->CP0_EntryHi = val;
1534 if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1535 sync_c0_entryhi(env, env->current_tc);
1537 /* If the ASID changes, flush qemu's TLB. */
1538 if ((old & 0xFF) != (val & 0xFF))
1539 cpu_mips_tlb_flush(env, 1);
1542 void helper_mttc0_entryhi(target_ulong arg1)
1544 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1545 CPUState *other = mips_cpu_map_tc(&other_tc);
1547 other->CP0_EntryHi = arg1;
1548 sync_c0_entryhi(other, other_tc);
1551 void helper_mtc0_compare (target_ulong arg1)
1553 cpu_mips_store_compare(env, arg1);
1556 void helper_mtc0_status (target_ulong arg1)
1559 uint32_t mask = env->CP0_Status_rw_bitmask;
1562 old = env->CP0_Status;
1563 env->CP0_Status = (env->CP0_Status & ~mask) | val;
1564 if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1565 sync_c0_status(env, env->current_tc);
1567 compute_hflags(env);
1570 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1571 qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1572 old, old & env->CP0_Cause & CP0Ca_IP_mask,
1573 val, val & env->CP0_Cause & CP0Ca_IP_mask,
1575 switch (env->hflags & MIPS_HFLAG_KSU) {
1576 case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1577 case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1578 case MIPS_HFLAG_KM: qemu_log("\n"); break;
1579 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1584 void helper_mttc0_status(target_ulong arg1)
1586 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1587 CPUState *other = mips_cpu_map_tc(&other_tc);
1589 other->CP0_Status = arg1 & ~0xf1000018;
1590 sync_c0_status(other, other_tc);
1593 void helper_mtc0_intctl (target_ulong arg1)
1595 /* vectored interrupts not implemented, no performance counters. */
1596 env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0);
1599 void helper_mtc0_srsctl (target_ulong arg1)
1601 uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1602 env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
1605 static void mtc0_cause(CPUState *cpu, target_ulong arg1)
1607 uint32_t mask = 0x00C00300;
1608 uint32_t old = cpu->CP0_Cause;
1611 if (cpu->insn_flags & ISA_MIPS32R2) {
1612 mask |= 1 << CP0Ca_DC;
1615 cpu->CP0_Cause = (cpu->CP0_Cause & ~mask) | (arg1 & mask);
1617 if ((old ^ cpu->CP0_Cause) & (1 << CP0Ca_DC)) {
1618 if (cpu->CP0_Cause & (1 << CP0Ca_DC)) {
1619 cpu_mips_stop_count(cpu);
1621 cpu_mips_start_count(cpu);
1625 /* Set/reset software interrupts */
1626 for (i = 0 ; i < 2 ; i++) {
1627 if ((old ^ cpu->CP0_Cause) & (1 << (CP0Ca_IP + i))) {
1628 cpu_mips_soft_irq(cpu, i, cpu->CP0_Cause & (1 << (CP0Ca_IP + i)));
1633 void helper_mtc0_cause(target_ulong arg1)
1635 mtc0_cause(env, arg1);
1638 void helper_mttc0_cause(target_ulong arg1)
1640 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1641 CPUState *other = mips_cpu_map_tc(&other_tc);
1643 mtc0_cause(other, arg1);
1646 target_ulong helper_mftc0_epc(void)
1648 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1649 CPUState *other = mips_cpu_map_tc(&other_tc);
1651 return other->CP0_EPC;
1654 target_ulong helper_mftc0_ebase(void)
1656 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1657 CPUState *other = mips_cpu_map_tc(&other_tc);
1659 return other->CP0_EBase;
1662 void helper_mtc0_ebase (target_ulong arg1)
1664 /* vectored interrupts not implemented */
1665 env->CP0_EBase = (env->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
1668 void helper_mttc0_ebase(target_ulong arg1)
1670 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1671 CPUState *other = mips_cpu_map_tc(&other_tc);
1672 other->CP0_EBase = (other->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
1675 target_ulong helper_mftc0_configx(target_ulong idx)
1677 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1678 CPUState *other = mips_cpu_map_tc(&other_tc);
1681 case 0: return other->CP0_Config0;
1682 case 1: return other->CP0_Config1;
1683 case 2: return other->CP0_Config2;
1684 case 3: return other->CP0_Config3;
1685 /* 4 and 5 are reserved. */
1686 case 6: return other->CP0_Config6;
1687 case 7: return other->CP0_Config7;
1694 void helper_mtc0_config0 (target_ulong arg1)
1696 env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
1699 void helper_mtc0_config2 (target_ulong arg1)
1701 /* tertiary/secondary caches not implemented */
1702 env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1705 void helper_mtc0_lladdr (target_ulong arg1)
1707 target_long mask = env->CP0_LLAddr_rw_bitmask;
1708 arg1 = arg1 << env->CP0_LLAddr_shift;
1709 env->lladdr = (env->lladdr & ~mask) | (arg1 & mask);
1712 void helper_mtc0_watchlo (target_ulong arg1, uint32_t sel)
1714 /* Watch exceptions for instructions, data loads, data stores
1716 env->CP0_WatchLo[sel] = (arg1 & ~0x7);
1719 void helper_mtc0_watchhi (target_ulong arg1, uint32_t sel)
1721 env->CP0_WatchHi[sel] = (arg1 & 0x40FF0FF8);
1722 env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
1725 void helper_mtc0_xcontext (target_ulong arg1)
1727 target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1728 env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
1731 void helper_mtc0_framemask (target_ulong arg1)
1733 env->CP0_Framemask = arg1; /* XXX */
1736 void helper_mtc0_debug (target_ulong arg1)
1738 env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
1739 if (arg1 & (1 << CP0DB_DM))
1740 env->hflags |= MIPS_HFLAG_DM;
1742 env->hflags &= ~MIPS_HFLAG_DM;
1745 void helper_mttc0_debug(target_ulong arg1)
1747 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1748 uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1749 CPUState *other = mips_cpu_map_tc(&other_tc);
1751 /* XXX: Might be wrong, check with EJTAG spec. */
1752 if (other_tc == other->current_tc)
1753 other->active_tc.CP0_Debug_tcstatus = val;
1755 other->tcs[other_tc].CP0_Debug_tcstatus = val;
1756 other->CP0_Debug = (other->CP0_Debug &
1757 ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1758 (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1761 void helper_mtc0_performance0 (target_ulong arg1)
1763 env->CP0_Performance0 = arg1 & 0x000007ff;
1766 void helper_mtc0_taglo (target_ulong arg1)
1768 env->CP0_TagLo = arg1 & 0xFFFFFCF6;
1771 void helper_mtc0_datalo (target_ulong arg1)
1773 env->CP0_DataLo = arg1; /* XXX */
1776 void helper_mtc0_taghi (target_ulong arg1)
1778 env->CP0_TagHi = arg1; /* XXX */
1781 void helper_mtc0_datahi (target_ulong arg1)
1783 env->CP0_DataHi = arg1; /* XXX */
1786 /* MIPS MT functions */
1787 target_ulong helper_mftgpr(uint32_t sel)
1789 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1790 CPUState *other = mips_cpu_map_tc(&other_tc);
1792 if (other_tc == other->current_tc)
1793 return other->active_tc.gpr[sel];
1795 return other->tcs[other_tc].gpr[sel];
1798 target_ulong helper_mftlo(uint32_t sel)
1800 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1801 CPUState *other = mips_cpu_map_tc(&other_tc);
1803 if (other_tc == other->current_tc)
1804 return other->active_tc.LO[sel];
1806 return other->tcs[other_tc].LO[sel];
1809 target_ulong helper_mfthi(uint32_t sel)
1811 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1812 CPUState *other = mips_cpu_map_tc(&other_tc);
1814 if (other_tc == other->current_tc)
1815 return other->active_tc.HI[sel];
1817 return other->tcs[other_tc].HI[sel];
1820 target_ulong helper_mftacx(uint32_t sel)
1822 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1823 CPUState *other = mips_cpu_map_tc(&other_tc);
1825 if (other_tc == other->current_tc)
1826 return other->active_tc.ACX[sel];
1828 return other->tcs[other_tc].ACX[sel];
1831 target_ulong helper_mftdsp(void)
1833 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1834 CPUState *other = mips_cpu_map_tc(&other_tc);
1836 if (other_tc == other->current_tc)
1837 return other->active_tc.DSPControl;
1839 return other->tcs[other_tc].DSPControl;
1842 void helper_mttgpr(target_ulong arg1, uint32_t sel)
1844 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1845 CPUState *other = mips_cpu_map_tc(&other_tc);
1847 if (other_tc == other->current_tc)
1848 other->active_tc.gpr[sel] = arg1;
1850 other->tcs[other_tc].gpr[sel] = arg1;
1853 void helper_mttlo(target_ulong arg1, uint32_t sel)
1855 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1856 CPUState *other = mips_cpu_map_tc(&other_tc);
1858 if (other_tc == other->current_tc)
1859 other->active_tc.LO[sel] = arg1;
1861 other->tcs[other_tc].LO[sel] = arg1;
1864 void helper_mtthi(target_ulong arg1, uint32_t sel)
1866 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1867 CPUState *other = mips_cpu_map_tc(&other_tc);
1869 if (other_tc == other->current_tc)
1870 other->active_tc.HI[sel] = arg1;
1872 other->tcs[other_tc].HI[sel] = arg1;
1875 void helper_mttacx(target_ulong arg1, uint32_t sel)
1877 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1878 CPUState *other = mips_cpu_map_tc(&other_tc);
1880 if (other_tc == other->current_tc)
1881 other->active_tc.ACX[sel] = arg1;
1883 other->tcs[other_tc].ACX[sel] = arg1;
1886 void helper_mttdsp(target_ulong arg1)
1888 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1889 CPUState *other = mips_cpu_map_tc(&other_tc);
1891 if (other_tc == other->current_tc)
1892 other->active_tc.DSPControl = arg1;
1894 other->tcs[other_tc].DSPControl = arg1;
1897 /* MIPS MT functions */
1898 target_ulong helper_dmt(void)
1904 target_ulong helper_emt(void)
1910 target_ulong helper_dvpe(void)
1912 CPUState *other_cpu = first_cpu;
1913 target_ulong prev = env->mvp->CP0_MVPControl;
1916 /* Turn off all VPEs except the one executing the dvpe. */
1917 if (other_cpu != env) {
1918 other_cpu->mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP);
1919 mips_vpe_sleep(other_cpu);
1921 other_cpu = other_cpu->next_cpu;
1922 } while (other_cpu);
1926 target_ulong helper_evpe(void)
1928 CPUState *other_cpu = first_cpu;
1929 target_ulong prev = env->mvp->CP0_MVPControl;
1932 if (other_cpu != env
1933 /* If the VPE is WFI, dont distrub it's sleep. */
1934 && !mips_vpe_is_wfi(other_cpu)) {
1935 /* Enable the VPE. */
1936 other_cpu->mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP);
1937 mips_vpe_wake(other_cpu); /* And wake it up. */
1939 other_cpu = other_cpu->next_cpu;
1940 } while (other_cpu);
1943 #endif /* !CONFIG_USER_ONLY */
1945 void helper_fork(target_ulong arg1, target_ulong arg2)
1947 // arg1 = rt, arg2 = rs
1949 // TODO: store to TC register
1952 target_ulong helper_yield(target_ulong arg)
1954 target_long arg1 = arg;
1957 /* No scheduling policy implemented. */
1959 if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
1960 env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
1961 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1962 env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
1963 helper_raise_exception(EXCP_THREAD);
1966 } else if (arg1 == 0) {
1967 if (0 /* TODO: TC underflow */) {
1968 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1969 helper_raise_exception(EXCP_THREAD);
1971 // TODO: Deallocate TC
1973 } else if (arg1 > 0) {
1974 /* Yield qualifier inputs not implemented. */
1975 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1976 env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
1977 helper_raise_exception(EXCP_THREAD);
1979 return env->CP0_YQMask;
1982 #ifndef CONFIG_USER_ONLY
1983 /* TLB management */
1984 static void cpu_mips_tlb_flush (CPUState *env, int flush_global)
1986 /* Flush qemu's TLB and discard all shadowed entries. */
1987 tlb_flush (env, flush_global);
1988 env->tlb->tlb_in_use = env->tlb->nb_tlb;
1991 static void r4k_mips_tlb_flush_extra (CPUState *env, int first)
1993 /* Discard entries from env->tlb[first] onwards. */
1994 while (env->tlb->tlb_in_use > first) {
1995 r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
1999 static void r4k_fill_tlb (int idx)
2003 /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
2004 tlb = &env->tlb->mmu.r4k.tlb[idx];
2005 tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
2006 #if defined(TARGET_MIPS64)
2007 tlb->VPN &= env->SEGMask;
2009 tlb->ASID = env->CP0_EntryHi & 0xFF;
2010 tlb->PageMask = env->CP0_PageMask;
2011 tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
2012 tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
2013 tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
2014 tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
2015 tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
2016 tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
2017 tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
2018 tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
2019 tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
2022 void r4k_helper_tlbwi (void)
2026 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
2028 /* Discard cached TLB entries. We could avoid doing this if the
2029 tlbwi is just upgrading access permissions on the current entry;
2030 that might be a further win. */
2031 r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
2033 r4k_invalidate_tlb(env, idx, 0);
2037 void r4k_helper_tlbwr (void)
2039 int r = cpu_mips_get_random(env);
2041 r4k_invalidate_tlb(env, r, 1);
2045 void r4k_helper_tlbp (void)
2054 ASID = env->CP0_EntryHi & 0xFF;
2055 for (i = 0; i < env->tlb->nb_tlb; i++) {
2056 tlb = &env->tlb->mmu.r4k.tlb[i];
2057 /* 1k pages are not supported. */
2058 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
2059 tag = env->CP0_EntryHi & ~mask;
2060 VPN = tlb->VPN & ~mask;
2061 /* Check ASID, virtual page number & size */
2062 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
2068 if (i == env->tlb->nb_tlb) {
2069 /* No match. Discard any shadow entries, if any of them match. */
2070 for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
2071 tlb = &env->tlb->mmu.r4k.tlb[i];
2072 /* 1k pages are not supported. */
2073 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
2074 tag = env->CP0_EntryHi & ~mask;
2075 VPN = tlb->VPN & ~mask;
2076 /* Check ASID, virtual page number & size */
2077 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
2078 r4k_mips_tlb_flush_extra (env, i);
2083 env->CP0_Index |= 0x80000000;
2087 void r4k_helper_tlbr (void)
2093 ASID = env->CP0_EntryHi & 0xFF;
2094 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
2095 tlb = &env->tlb->mmu.r4k.tlb[idx];
2097 /* If this will change the current ASID, flush qemu's TLB. */
2098 if (ASID != tlb->ASID)
2099 cpu_mips_tlb_flush (env, 1);
2101 r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
2103 env->CP0_EntryHi = tlb->VPN | tlb->ASID;
2104 env->CP0_PageMask = tlb->PageMask;
2105 env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
2106 (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
2107 env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
2108 (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
2111 void helper_tlbwi(void)
2113 env->tlb->helper_tlbwi();
2116 void helper_tlbwr(void)
2118 env->tlb->helper_tlbwr();
2121 void helper_tlbp(void)
2123 env->tlb->helper_tlbp();
2126 void helper_tlbr(void)
2128 env->tlb->helper_tlbr();
2132 target_ulong helper_di (void)
2134 target_ulong t0 = env->CP0_Status;
2136 env->CP0_Status = t0 & ~(1 << CP0St_IE);
2140 target_ulong helper_ei (void)
2142 target_ulong t0 = env->CP0_Status;
2144 env->CP0_Status = t0 | (1 << CP0St_IE);
2148 static void debug_pre_eret (void)
2150 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
2151 qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
2152 env->active_tc.PC, env->CP0_EPC);
2153 if (env->CP0_Status & (1 << CP0St_ERL))
2154 qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
2155 if (env->hflags & MIPS_HFLAG_DM)
2156 qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
2161 static void debug_post_eret (void)
2163 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
2164 qemu_log(" => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
2165 env->active_tc.PC, env->CP0_EPC);
2166 if (env->CP0_Status & (1 << CP0St_ERL))
2167 qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
2168 if (env->hflags & MIPS_HFLAG_DM)
2169 qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
2170 switch (env->hflags & MIPS_HFLAG_KSU) {
2171 case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
2172 case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
2173 case MIPS_HFLAG_KM: qemu_log("\n"); break;
2174 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
2179 static void set_pc (target_ulong error_pc)
2181 env->active_tc.PC = error_pc & ~(target_ulong)1;
2183 env->hflags |= MIPS_HFLAG_M16;
2185 env->hflags &= ~(MIPS_HFLAG_M16);
2189 void helper_eret (void)
2192 if (env->CP0_Status & (1 << CP0St_ERL)) {
2193 set_pc(env->CP0_ErrorEPC);
2194 env->CP0_Status &= ~(1 << CP0St_ERL);
2196 set_pc(env->CP0_EPC);
2197 env->CP0_Status &= ~(1 << CP0St_EXL);
2199 compute_hflags(env);
2204 void helper_deret (void)
2207 set_pc(env->CP0_DEPC);
2209 env->hflags &= MIPS_HFLAG_DM;
2210 compute_hflags(env);
2214 #endif /* !CONFIG_USER_ONLY */
2216 target_ulong helper_rdhwr_cpunum(void)
2218 if ((env->hflags & MIPS_HFLAG_CP0) ||
2219 (env->CP0_HWREna & (1 << 0)))
2220 return env->CP0_EBase & 0x3ff;
2222 helper_raise_exception(EXCP_RI);
2227 target_ulong helper_rdhwr_synci_step(void)
2229 if ((env->hflags & MIPS_HFLAG_CP0) ||
2230 (env->CP0_HWREna & (1 << 1)))
2231 return env->SYNCI_Step;
2233 helper_raise_exception(EXCP_RI);
2238 target_ulong helper_rdhwr_cc(void)
2240 if ((env->hflags & MIPS_HFLAG_CP0) ||
2241 (env->CP0_HWREna & (1 << 2)))
2242 return env->CP0_Count;
2244 helper_raise_exception(EXCP_RI);
2249 target_ulong helper_rdhwr_ccres(void)
2251 if ((env->hflags & MIPS_HFLAG_CP0) ||
2252 (env->CP0_HWREna & (1 << 3)))
2255 helper_raise_exception(EXCP_RI);
2260 void helper_pmon (int function)
2264 case 2: /* TODO: char inbyte(int waitflag); */
2265 if (env->active_tc.gpr[4] == 0)
2266 env->active_tc.gpr[2] = -1;
2268 case 11: /* TODO: char inbyte (void); */
2269 env->active_tc.gpr[2] = -1;
2273 printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
2279 unsigned char *fmt = (void *)(unsigned long)env->active_tc.gpr[4];
2286 void helper_wait (void)
2289 cpu_reset_interrupt(env, CPU_INTERRUPT_WAKE);
2290 helper_raise_exception(EXCP_HLT);
2293 #if !defined(CONFIG_USER_ONLY)
2295 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
2297 #define MMUSUFFIX _mmu
2298 #define ALIGNED_ONLY
2301 #include "softmmu_template.h"
2304 #include "softmmu_template.h"
2307 #include "softmmu_template.h"
2310 #include "softmmu_template.h"
2312 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
2314 env->CP0_BadVAddr = addr;
2315 do_restore_state (retaddr);
2316 helper_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
2319 void tlb_fill(CPUState *env1, target_ulong addr, int is_write, int mmu_idx,
2322 TranslationBlock *tb;
2323 CPUState *saved_env;
2329 ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx);
2332 /* now we have a real cpu fault */
2333 pc = (unsigned long)retaddr;
2334 tb = tb_find_pc(pc);
2336 /* the PC is inside the translated code. It means that we have
2337 a virtual CPU fault */
2338 cpu_restore_state(tb, env, pc);
2341 helper_raise_exception_err(env->exception_index, env->error_code);
2346 void cpu_unassigned_access(CPUState *env1, target_phys_addr_t addr,
2347 int is_write, int is_exec, int unused, int size)
2352 helper_raise_exception(EXCP_IBE);
2354 helper_raise_exception(EXCP_DBE);
2356 #endif /* !CONFIG_USER_ONLY */
2358 /* Complex FPU operations which may need stack space. */
2360 #define FLOAT_ONE32 make_float32(0x3f8 << 20)
2361 #define FLOAT_ONE64 make_float64(0x3ffULL << 52)
2362 #define FLOAT_TWO32 make_float32(1 << 30)
2363 #define FLOAT_TWO64 make_float64(1ULL << 62)
2364 #define FLOAT_QNAN32 0x7fbfffff
2365 #define FLOAT_QNAN64 0x7ff7ffffffffffffULL
2366 #define FLOAT_SNAN32 0x7fffffff
2367 #define FLOAT_SNAN64 0x7fffffffffffffffULL
2369 /* convert MIPS rounding mode in FCR31 to IEEE library */
2370 static unsigned int ieee_rm[] = {
2371 float_round_nearest_even,
2372 float_round_to_zero,
2377 #define RESTORE_ROUNDING_MODE \
2378 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
2380 #define RESTORE_FLUSH_MODE \
2381 set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0, &env->active_fpu.fp_status);
2383 target_ulong helper_cfc1 (uint32_t reg)
2389 arg1 = (int32_t)env->active_fpu.fcr0;
2392 arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1);
2395 arg1 = env->active_fpu.fcr31 & 0x0003f07c;
2398 arg1 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4);
2401 arg1 = (int32_t)env->active_fpu.fcr31;
2408 void helper_ctc1 (target_ulong arg1, uint32_t reg)
2412 if (arg1 & 0xffffff00)
2414 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) |
2415 ((arg1 & 0x1) << 23);
2418 if (arg1 & 0x007c0000)
2420 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c);
2423 if (arg1 & 0x007c0000)
2425 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) |
2426 ((arg1 & 0x4) << 22);
2429 if (arg1 & 0x007c0000)
2431 env->active_fpu.fcr31 = arg1;
2436 /* set rounding mode */
2437 RESTORE_ROUNDING_MODE;
2438 /* set flush-to-zero mode */
2440 set_float_exception_flags(0, &env->active_fpu.fp_status);
2441 if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
2442 helper_raise_exception(EXCP_FPE);
2445 static inline int ieee_ex_to_mips(int xcpt)
2449 if (xcpt & float_flag_invalid) {
2452 if (xcpt & float_flag_overflow) {
2455 if (xcpt & float_flag_underflow) {
2456 ret |= FP_UNDERFLOW;
2458 if (xcpt & float_flag_divbyzero) {
2461 if (xcpt & float_flag_inexact) {
2468 static inline void update_fcr31(void)
2470 int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status));
2472 SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
2473 if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp)
2474 helper_raise_exception(EXCP_FPE);
2476 UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
2480 Single precition routines have a "s" suffix, double precision a
2481 "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2482 paired single lower "pl", paired single upper "pu". */
2484 /* unary operations, modifying fp status */
2485 uint64_t helper_float_sqrt_d(uint64_t fdt0)
2487 return float64_sqrt(fdt0, &env->active_fpu.fp_status);
2490 uint32_t helper_float_sqrt_s(uint32_t fst0)
2492 return float32_sqrt(fst0, &env->active_fpu.fp_status);
2495 uint64_t helper_float_cvtd_s(uint32_t fst0)
2499 set_float_exception_flags(0, &env->active_fpu.fp_status);
2500 fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
2505 uint64_t helper_float_cvtd_w(uint32_t wt0)
2509 set_float_exception_flags(0, &env->active_fpu.fp_status);
2510 fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
2515 uint64_t helper_float_cvtd_l(uint64_t dt0)
2519 set_float_exception_flags(0, &env->active_fpu.fp_status);
2520 fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
2525 uint64_t helper_float_cvtl_d(uint64_t fdt0)
2529 set_float_exception_flags(0, &env->active_fpu.fp_status);
2530 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2532 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2537 uint64_t helper_float_cvtl_s(uint32_t fst0)
2541 set_float_exception_flags(0, &env->active_fpu.fp_status);
2542 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2544 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2549 uint64_t helper_float_cvtps_pw(uint64_t dt0)
2554 set_float_exception_flags(0, &env->active_fpu.fp_status);
2555 fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2556 fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
2558 return ((uint64_t)fsth2 << 32) | fst2;
2561 uint64_t helper_float_cvtpw_ps(uint64_t fdt0)
2566 set_float_exception_flags(0, &env->active_fpu.fp_status);
2567 wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2568 wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
2570 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID)) {
2572 wth2 = FLOAT_SNAN32;
2574 return ((uint64_t)wth2 << 32) | wt2;
2577 uint32_t helper_float_cvts_d(uint64_t fdt0)
2581 set_float_exception_flags(0, &env->active_fpu.fp_status);
2582 fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
2587 uint32_t helper_float_cvts_w(uint32_t wt0)
2591 set_float_exception_flags(0, &env->active_fpu.fp_status);
2592 fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
2597 uint32_t helper_float_cvts_l(uint64_t dt0)
2601 set_float_exception_flags(0, &env->active_fpu.fp_status);
2602 fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
2607 uint32_t helper_float_cvts_pl(uint32_t wt0)
2611 set_float_exception_flags(0, &env->active_fpu.fp_status);
2617 uint32_t helper_float_cvts_pu(uint32_t wth0)
2621 set_float_exception_flags(0, &env->active_fpu.fp_status);
2627 uint32_t helper_float_cvtw_s(uint32_t fst0)
2631 set_float_exception_flags(0, &env->active_fpu.fp_status);
2632 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2634 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2639 uint32_t helper_float_cvtw_d(uint64_t fdt0)
2643 set_float_exception_flags(0, &env->active_fpu.fp_status);
2644 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2646 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2651 uint64_t helper_float_roundl_d(uint64_t fdt0)
2655 set_float_exception_flags(0, &env->active_fpu.fp_status);
2656 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2657 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2658 RESTORE_ROUNDING_MODE;
2660 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2665 uint64_t helper_float_roundl_s(uint32_t fst0)
2669 set_float_exception_flags(0, &env->active_fpu.fp_status);
2670 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2671 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2672 RESTORE_ROUNDING_MODE;
2674 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2679 uint32_t helper_float_roundw_d(uint64_t fdt0)
2683 set_float_exception_flags(0, &env->active_fpu.fp_status);
2684 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2685 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2686 RESTORE_ROUNDING_MODE;
2688 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2693 uint32_t helper_float_roundw_s(uint32_t fst0)
2697 set_float_exception_flags(0, &env->active_fpu.fp_status);
2698 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2699 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2700 RESTORE_ROUNDING_MODE;
2702 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2707 uint64_t helper_float_truncl_d(uint64_t fdt0)
2711 set_float_exception_flags(0, &env->active_fpu.fp_status);
2712 dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
2714 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2719 uint64_t helper_float_truncl_s(uint32_t fst0)
2723 set_float_exception_flags(0, &env->active_fpu.fp_status);
2724 dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
2726 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2731 uint32_t helper_float_truncw_d(uint64_t fdt0)
2735 set_float_exception_flags(0, &env->active_fpu.fp_status);
2736 wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
2738 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2743 uint32_t helper_float_truncw_s(uint32_t fst0)
2747 set_float_exception_flags(0, &env->active_fpu.fp_status);
2748 wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
2750 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2755 uint64_t helper_float_ceill_d(uint64_t fdt0)
2759 set_float_exception_flags(0, &env->active_fpu.fp_status);
2760 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2761 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2762 RESTORE_ROUNDING_MODE;
2764 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2769 uint64_t helper_float_ceill_s(uint32_t fst0)
2773 set_float_exception_flags(0, &env->active_fpu.fp_status);
2774 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2775 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2776 RESTORE_ROUNDING_MODE;
2778 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2783 uint32_t helper_float_ceilw_d(uint64_t fdt0)
2787 set_float_exception_flags(0, &env->active_fpu.fp_status);
2788 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2789 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2790 RESTORE_ROUNDING_MODE;
2792 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2797 uint32_t helper_float_ceilw_s(uint32_t fst0)
2801 set_float_exception_flags(0, &env->active_fpu.fp_status);
2802 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2803 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2804 RESTORE_ROUNDING_MODE;
2806 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2811 uint64_t helper_float_floorl_d(uint64_t fdt0)
2815 set_float_exception_flags(0, &env->active_fpu.fp_status);
2816 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2817 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2818 RESTORE_ROUNDING_MODE;
2820 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2825 uint64_t helper_float_floorl_s(uint32_t fst0)
2829 set_float_exception_flags(0, &env->active_fpu.fp_status);
2830 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2831 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2832 RESTORE_ROUNDING_MODE;
2834 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2839 uint32_t helper_float_floorw_d(uint64_t fdt0)
2843 set_float_exception_flags(0, &env->active_fpu.fp_status);
2844 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2845 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2846 RESTORE_ROUNDING_MODE;
2848 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2853 uint32_t helper_float_floorw_s(uint32_t fst0)
2857 set_float_exception_flags(0, &env->active_fpu.fp_status);
2858 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2859 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2860 RESTORE_ROUNDING_MODE;
2862 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2867 /* unary operations, not modifying fp status */
2868 #define FLOAT_UNOP(name) \
2869 uint64_t helper_float_ ## name ## _d(uint64_t fdt0) \
2871 return float64_ ## name(fdt0); \
2873 uint32_t helper_float_ ## name ## _s(uint32_t fst0) \
2875 return float32_ ## name(fst0); \
2877 uint64_t helper_float_ ## name ## _ps(uint64_t fdt0) \
2882 wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF); \
2883 wth0 = float32_ ## name(fdt0 >> 32); \
2884 return ((uint64_t)wth0 << 32) | wt0; \
2890 /* MIPS specific unary operations */
2891 uint64_t helper_float_recip_d(uint64_t fdt0)
2895 set_float_exception_flags(0, &env->active_fpu.fp_status);
2896 fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2901 uint32_t helper_float_recip_s(uint32_t fst0)
2905 set_float_exception_flags(0, &env->active_fpu.fp_status);
2906 fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2911 uint64_t helper_float_rsqrt_d(uint64_t fdt0)
2915 set_float_exception_flags(0, &env->active_fpu.fp_status);
2916 fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2917 fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2922 uint32_t helper_float_rsqrt_s(uint32_t fst0)
2926 set_float_exception_flags(0, &env->active_fpu.fp_status);
2927 fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2928 fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2933 uint64_t helper_float_recip1_d(uint64_t fdt0)
2937 set_float_exception_flags(0, &env->active_fpu.fp_status);
2938 fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2943 uint32_t helper_float_recip1_s(uint32_t fst0)
2947 set_float_exception_flags(0, &env->active_fpu.fp_status);
2948 fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2953 uint64_t helper_float_recip1_ps(uint64_t fdt0)
2958 set_float_exception_flags(0, &env->active_fpu.fp_status);
2959 fst2 = float32_div(FLOAT_ONE32, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2960 fsth2 = float32_div(FLOAT_ONE32, fdt0 >> 32, &env->active_fpu.fp_status);
2962 return ((uint64_t)fsth2 << 32) | fst2;
2965 uint64_t helper_float_rsqrt1_d(uint64_t fdt0)
2969 set_float_exception_flags(0, &env->active_fpu.fp_status);
2970 fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2971 fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2976 uint32_t helper_float_rsqrt1_s(uint32_t fst0)
2980 set_float_exception_flags(0, &env->active_fpu.fp_status);
2981 fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2982 fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2987 uint64_t helper_float_rsqrt1_ps(uint64_t fdt0)
2992 set_float_exception_flags(0, &env->active_fpu.fp_status);
2993 fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2994 fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
2995 fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2996 fsth2 = float32_div(FLOAT_ONE32, fsth2, &env->active_fpu.fp_status);
2998 return ((uint64_t)fsth2 << 32) | fst2;
3001 #define FLOAT_OP(name, p) void helper_float_##name##_##p(void)
3003 /* binary operations */
3004 #define FLOAT_BINOP(name) \
3005 uint64_t helper_float_ ## name ## _d(uint64_t fdt0, uint64_t fdt1) \
3009 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3010 dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status); \
3012 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) \
3013 dt2 = FLOAT_QNAN64; \
3017 uint32_t helper_float_ ## name ## _s(uint32_t fst0, uint32_t fst1) \
3021 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3022 wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
3024 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) \
3025 wt2 = FLOAT_QNAN32; \
3029 uint64_t helper_float_ ## name ## _ps(uint64_t fdt0, uint64_t fdt1) \
3031 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
3032 uint32_t fsth0 = fdt0 >> 32; \
3033 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
3034 uint32_t fsth1 = fdt1 >> 32; \
3038 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3039 wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
3040 wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status); \
3042 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) { \
3043 wt2 = FLOAT_QNAN32; \
3044 wth2 = FLOAT_QNAN32; \
3046 return ((uint64_t)wth2 << 32) | wt2; \
3055 /* ternary operations */
3056 #define FLOAT_TERNOP(name1, name2) \
3057 uint64_t helper_float_ ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
3060 fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status); \
3061 return float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status); \
3064 uint32_t helper_float_ ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
3067 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
3068 return float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
3071 uint64_t helper_float_ ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1, \
3074 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
3075 uint32_t fsth0 = fdt0 >> 32; \
3076 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
3077 uint32_t fsth1 = fdt1 >> 32; \
3078 uint32_t fst2 = fdt2 & 0XFFFFFFFF; \
3079 uint32_t fsth2 = fdt2 >> 32; \
3081 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
3082 fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status); \
3083 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
3084 fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status); \
3085 return ((uint64_t)fsth2 << 32) | fst2; \
3088 FLOAT_TERNOP(mul, add)
3089 FLOAT_TERNOP(mul, sub)
3092 /* negated ternary operations */
3093 #define FLOAT_NTERNOP(name1, name2) \
3094 uint64_t helper_float_n ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
3097 fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status); \
3098 fdt2 = float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status); \
3099 return float64_chs(fdt2); \
3102 uint32_t helper_float_n ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
3105 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
3106 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
3107 return float32_chs(fst2); \
3110 uint64_t helper_float_n ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1,\
3113 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
3114 uint32_t fsth0 = fdt0 >> 32; \
3115 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
3116 uint32_t fsth1 = fdt1 >> 32; \
3117 uint32_t fst2 = fdt2 & 0XFFFFFFFF; \
3118 uint32_t fsth2 = fdt2 >> 32; \
3120 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
3121 fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status); \
3122 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
3123 fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status); \
3124 fst2 = float32_chs(fst2); \
3125 fsth2 = float32_chs(fsth2); \
3126 return ((uint64_t)fsth2 << 32) | fst2; \
3129 FLOAT_NTERNOP(mul, add)
3130 FLOAT_NTERNOP(mul, sub)
3131 #undef FLOAT_NTERNOP
3133 /* MIPS specific binary operations */
3134 uint64_t helper_float_recip2_d(uint64_t fdt0, uint64_t fdt2)
3136 set_float_exception_flags(0, &env->active_fpu.fp_status);
3137 fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
3138 fdt2 = float64_chs(float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status));
3143 uint32_t helper_float_recip2_s(uint32_t fst0, uint32_t fst2)
3145 set_float_exception_flags(0, &env->active_fpu.fp_status);
3146 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3147 fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
3152 uint64_t helper_float_recip2_ps(uint64_t fdt0, uint64_t fdt2)
3154 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3155 uint32_t fsth0 = fdt0 >> 32;
3156 uint32_t fst2 = fdt2 & 0XFFFFFFFF;
3157 uint32_t fsth2 = fdt2 >> 32;
3159 set_float_exception_flags(0, &env->active_fpu.fp_status);
3160 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3161 fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
3162 fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
3163 fsth2 = float32_chs(float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status));
3165 return ((uint64_t)fsth2 << 32) | fst2;
3168 uint64_t helper_float_rsqrt2_d(uint64_t fdt0, uint64_t fdt2)
3170 set_float_exception_flags(0, &env->active_fpu.fp_status);
3171 fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
3172 fdt2 = float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status);
3173 fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status));
3178 uint32_t helper_float_rsqrt2_s(uint32_t fst0, uint32_t fst2)
3180 set_float_exception_flags(0, &env->active_fpu.fp_status);
3181 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3182 fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
3183 fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
3188 uint64_t helper_float_rsqrt2_ps(uint64_t fdt0, uint64_t fdt2)
3190 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3191 uint32_t fsth0 = fdt0 >> 32;
3192 uint32_t fst2 = fdt2 & 0XFFFFFFFF;
3193 uint32_t fsth2 = fdt2 >> 32;
3195 set_float_exception_flags(0, &env->active_fpu.fp_status);
3196 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3197 fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
3198 fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
3199 fsth2 = float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status);
3200 fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
3201 fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status));
3203 return ((uint64_t)fsth2 << 32) | fst2;
3206 uint64_t helper_float_addr_ps(uint64_t fdt0, uint64_t fdt1)
3208 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3209 uint32_t fsth0 = fdt0 >> 32;
3210 uint32_t fst1 = fdt1 & 0XFFFFFFFF;
3211 uint32_t fsth1 = fdt1 >> 32;
3215 set_float_exception_flags(0, &env->active_fpu.fp_status);
3216 fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status);
3217 fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status);
3219 return ((uint64_t)fsth2 << 32) | fst2;
3222 uint64_t helper_float_mulr_ps(uint64_t fdt0, uint64_t fdt1)
3224 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3225 uint32_t fsth0 = fdt0 >> 32;
3226 uint32_t fst1 = fdt1 & 0XFFFFFFFF;
3227 uint32_t fsth1 = fdt1 >> 32;
3231 set_float_exception_flags(0, &env->active_fpu.fp_status);
3232 fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status);
3233 fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status);
3235 return ((uint64_t)fsth2 << 32) | fst2;
3238 /* compare operations */
3239 #define FOP_COND_D(op, cond) \
3240 void helper_cmp_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3243 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3247 SET_FP_COND(cc, env->active_fpu); \
3249 CLEAR_FP_COND(cc, env->active_fpu); \
3251 void helper_cmpabs_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3254 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3255 fdt0 = float64_abs(fdt0); \
3256 fdt1 = float64_abs(fdt1); \
3260 SET_FP_COND(cc, env->active_fpu); \
3262 CLEAR_FP_COND(cc, env->active_fpu); \
3265 /* NOTE: the comma operator will make "cond" to eval to false,
3266 * but float64_unordered_quiet() is still called. */
3267 FOP_COND_D(f, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status), 0))
3268 FOP_COND_D(un, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status))
3269 FOP_COND_D(eq, float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3270 FOP_COND_D(ueq, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3271 FOP_COND_D(olt, float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3272 FOP_COND_D(ult, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3273 FOP_COND_D(ole, float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3274 FOP_COND_D(ule, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3275 /* NOTE: the comma operator will make "cond" to eval to false,
3276 * but float64_unordered() is still called. */
3277 FOP_COND_D(sf, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status), 0))
3278 FOP_COND_D(ngle,float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status))
3279 FOP_COND_D(seq, float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3280 FOP_COND_D(ngl, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3281 FOP_COND_D(lt, float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3282 FOP_COND_D(nge, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3283 FOP_COND_D(le, float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3284 FOP_COND_D(ngt, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3286 #define FOP_COND_S(op, cond) \
3287 void helper_cmp_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
3290 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3294 SET_FP_COND(cc, env->active_fpu); \
3296 CLEAR_FP_COND(cc, env->active_fpu); \
3298 void helper_cmpabs_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
3301 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3302 fst0 = float32_abs(fst0); \
3303 fst1 = float32_abs(fst1); \
3307 SET_FP_COND(cc, env->active_fpu); \
3309 CLEAR_FP_COND(cc, env->active_fpu); \
3312 /* NOTE: the comma operator will make "cond" to eval to false,
3313 * but float32_unordered_quiet() is still called. */
3314 FOP_COND_S(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0))
3315 FOP_COND_S(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status))
3316 FOP_COND_S(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
3317 FOP_COND_S(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
3318 FOP_COND_S(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
3319 FOP_COND_S(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
3320 FOP_COND_S(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
3321 FOP_COND_S(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
3322 /* NOTE: the comma operator will make "cond" to eval to false,
3323 * but float32_unordered() is still called. */
3324 FOP_COND_S(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0))
3325 FOP_COND_S(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status))
3326 FOP_COND_S(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3327 FOP_COND_S(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3328 FOP_COND_S(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3329 FOP_COND_S(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3330 FOP_COND_S(le, float32_le(fst0, fst1, &env->active_fpu.fp_status))
3331 FOP_COND_S(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status))
3333 #define FOP_COND_PS(op, condl, condh) \
3334 void helper_cmp_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3336 uint32_t fst0, fsth0, fst1, fsth1; \
3338 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3339 fst0 = fdt0 & 0XFFFFFFFF; \
3340 fsth0 = fdt0 >> 32; \
3341 fst1 = fdt1 & 0XFFFFFFFF; \
3342 fsth1 = fdt1 >> 32; \
3347 SET_FP_COND(cc, env->active_fpu); \
3349 CLEAR_FP_COND(cc, env->active_fpu); \
3351 SET_FP_COND(cc + 1, env->active_fpu); \
3353 CLEAR_FP_COND(cc + 1, env->active_fpu); \
3355 void helper_cmpabs_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3357 uint32_t fst0, fsth0, fst1, fsth1; \
3359 fst0 = float32_abs(fdt0 & 0XFFFFFFFF); \
3360 fsth0 = float32_abs(fdt0 >> 32); \
3361 fst1 = float32_abs(fdt1 & 0XFFFFFFFF); \
3362 fsth1 = float32_abs(fdt1 >> 32); \
3367 SET_FP_COND(cc, env->active_fpu); \
3369 CLEAR_FP_COND(cc, env->active_fpu); \
3371 SET_FP_COND(cc + 1, env->active_fpu); \
3373 CLEAR_FP_COND(cc + 1, env->active_fpu); \
3376 /* NOTE: the comma operator will make "cond" to eval to false,
3377 * but float32_unordered_quiet() is still called. */
3378 FOP_COND_PS(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0),
3379 (float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status), 0))
3380 FOP_COND_PS(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status),
3381 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status))
3382 FOP_COND_PS(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
3383 float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3384 FOP_COND_PS(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
3385 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3386 FOP_COND_PS(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
3387 float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3388 FOP_COND_PS(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
3389 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3390 FOP_COND_PS(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
3391 float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3392 FOP_COND_PS(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
3393 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3394 /* NOTE: the comma operator will make "cond" to eval to false,
3395 * but float32_unordered() is still called. */
3396 FOP_COND_PS(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0),
3397 (float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status), 0))
3398 FOP_COND_PS(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status),
3399 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status))
3400 FOP_COND_PS(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3401 float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3402 FOP_COND_PS(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3403 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3404 FOP_COND_PS(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3405 float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3406 FOP_COND_PS(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3407 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3408 FOP_COND_PS(le, float32_le(fst0, fst1, &env->active_fpu.fp_status),
3409 float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3410 FOP_COND_PS(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status),
3411 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))