1 /* frv simulator support code
2 Copyright (C) 1998-2016 Free Software Foundation, Inc.
3 Contributed by Red Hat.
5 This file is part of the GNU simulators.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21 #define WANT_CPU_FRVBF
26 #include "cgen-engine.h"
29 #include "gdb/sim-frv.h"
32 /* Maintain a flag in order to know when to write the address of the next
33 VLIW instruction into the LR register. Used by JMPL. JMPIL, and CALL
35 int frvbf_write_next_vliw_addr_to_LR;
37 /* The contents of BUF are in target byte order. */
39 frvbf_fetch_register (SIM_CPU *current_cpu, int rn, unsigned char *buf, int len)
41 if (SIM_FRV_GR0_REGNUM <= rn && rn <= SIM_FRV_GR63_REGNUM)
43 int hi_available, lo_available;
44 int grn = rn - SIM_FRV_GR0_REGNUM;
46 frv_gr_registers_available (current_cpu, &hi_available, &lo_available);
48 if ((grn < 32 && !lo_available) || (grn >= 32 && !hi_available))
51 SETTSI (buf, GET_H_GR (grn));
53 else if (SIM_FRV_FR0_REGNUM <= rn && rn <= SIM_FRV_FR63_REGNUM)
55 int hi_available, lo_available;
56 int frn = rn - SIM_FRV_FR0_REGNUM;
58 frv_fr_registers_available (current_cpu, &hi_available, &lo_available);
60 if ((frn < 32 && !lo_available) || (frn >= 32 && !hi_available))
63 SETTSI (buf, GET_H_FR (frn));
65 else if (rn == SIM_FRV_PC_REGNUM)
66 SETTSI (buf, GET_H_PC ());
67 else if (SIM_FRV_SPR0_REGNUM <= rn && rn <= SIM_FRV_SPR4095_REGNUM)
69 /* Make sure the register is implemented. */
70 FRV_REGISTER_CONTROL *control = CPU_REGISTER_CONTROL (current_cpu);
71 int spr = rn - SIM_FRV_SPR0_REGNUM;
72 if (! control->spr[spr].implemented)
74 SETTSI (buf, GET_H_SPR (spr));
78 SETTSI (buf, 0xdeadbeef);
85 /* The contents of BUF are in target byte order. */
88 frvbf_store_register (SIM_CPU *current_cpu, int rn, unsigned char *buf, int len)
90 if (SIM_FRV_GR0_REGNUM <= rn && rn <= SIM_FRV_GR63_REGNUM)
92 int hi_available, lo_available;
93 int grn = rn - SIM_FRV_GR0_REGNUM;
95 frv_gr_registers_available (current_cpu, &hi_available, &lo_available);
97 if ((grn < 32 && !lo_available) || (grn >= 32 && !hi_available))
100 SET_H_GR (grn, GETTSI (buf));
102 else if (SIM_FRV_FR0_REGNUM <= rn && rn <= SIM_FRV_FR63_REGNUM)
104 int hi_available, lo_available;
105 int frn = rn - SIM_FRV_FR0_REGNUM;
107 frv_fr_registers_available (current_cpu, &hi_available, &lo_available);
109 if ((frn < 32 && !lo_available) || (frn >= 32 && !hi_available))
112 SET_H_FR (frn, GETTSI (buf));
114 else if (rn == SIM_FRV_PC_REGNUM)
115 SET_H_PC (GETTSI (buf));
116 else if (SIM_FRV_SPR0_REGNUM <= rn && rn <= SIM_FRV_SPR4095_REGNUM)
118 /* Make sure the register is implemented. */
119 FRV_REGISTER_CONTROL *control = CPU_REGISTER_CONTROL (current_cpu);
120 int spr = rn - SIM_FRV_SPR0_REGNUM;
121 if (! control->spr[spr].implemented)
123 SET_H_SPR (spr, GETTSI (buf));
131 /* Cover fns to access the general registers. */
133 frvbf_h_gr_get_handler (SIM_CPU *current_cpu, UINT gr)
135 frv_check_gr_access (current_cpu, gr);
136 return CPU (h_gr[gr]);
140 frvbf_h_gr_set_handler (SIM_CPU *current_cpu, UINT gr, USI newval)
142 frv_check_gr_access (current_cpu, gr);
145 return; /* Storing into gr0 has no effect. */
147 CPU (h_gr[gr]) = newval;
150 /* Cover fns to access the floating point registers. */
152 frvbf_h_fr_get_handler (SIM_CPU *current_cpu, UINT fr)
154 frv_check_fr_access (current_cpu, fr);
155 return CPU (h_fr[fr]);
159 frvbf_h_fr_set_handler (SIM_CPU *current_cpu, UINT fr, SF newval)
161 frv_check_fr_access (current_cpu, fr);
162 CPU (h_fr[fr]) = newval;
165 /* Cover fns to access the general registers as double words. */
167 check_register_alignment (SIM_CPU *current_cpu, UINT reg, int align_mask)
169 if (reg & align_mask)
171 SIM_DESC sd = CPU_STATE (current_cpu);
172 switch (STATE_ARCHITECTURE (sd)->mach)
174 /* Note: there is a discrepancy between V2.2 of the FR400
175 instruction manual and the various FR4xx LSI specs.
176 The former claims that unaligned registers cause a
177 register_exception while the latter say it's an
178 illegal_instruction. The LSI specs appear to be
179 correct; in fact, the FR4xx series is not documented
180 as having a register_exception. */
184 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
186 case bfd_mach_frvtomcat:
189 frv_queue_register_exception_interrupt (current_cpu,
203 check_fr_register_alignment (SIM_CPU *current_cpu, UINT reg, int align_mask)
205 if (reg & align_mask)
207 SIM_DESC sd = CPU_STATE (current_cpu);
208 switch (STATE_ARCHITECTURE (sd)->mach)
210 /* See comment in check_register_alignment(). */
214 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
216 case bfd_mach_frvtomcat:
220 struct frv_fp_exception_info fp_info = {
221 FSR_NO_EXCEPTION, FTT_INVALID_FR
223 frv_queue_fp_exception_interrupt (current_cpu, & fp_info);
237 check_memory_alignment (SIM_CPU *current_cpu, SI address, int align_mask)
239 if (address & align_mask)
241 SIM_DESC sd = CPU_STATE (current_cpu);
242 switch (STATE_ARCHITECTURE (sd)->mach)
244 /* See comment in check_register_alignment(). */
247 frv_queue_data_access_error_interrupt (current_cpu, address);
249 case bfd_mach_frvtomcat:
252 frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
258 address &= ~align_mask;
265 frvbf_h_gr_double_get_handler (SIM_CPU *current_cpu, UINT gr)
270 return 0; /* gr0 is always 0. */
272 /* Check the register alignment. */
273 gr = check_register_alignment (current_cpu, gr, 1);
275 value = GET_H_GR (gr);
277 value |= (USI) GET_H_GR (gr + 1);
282 frvbf_h_gr_double_set_handler (SIM_CPU *current_cpu, UINT gr, DI newval)
285 return; /* Storing into gr0 has no effect. */
287 /* Check the register alignment. */
288 gr = check_register_alignment (current_cpu, gr, 1);
290 SET_H_GR (gr , (newval >> 32) & 0xffffffff);
291 SET_H_GR (gr + 1, (newval ) & 0xffffffff);
294 /* Cover fns to access the floating point register as double words. */
296 frvbf_h_fr_double_get_handler (SIM_CPU *current_cpu, UINT fr)
303 /* Check the register alignment. */
304 fr = check_fr_register_alignment (current_cpu, fr, 1);
306 if (HOST_BYTE_ORDER == BFD_ENDIAN_LITTLE)
308 value.as_sf[1] = GET_H_FR (fr);
309 value.as_sf[0] = GET_H_FR (fr + 1);
313 value.as_sf[0] = GET_H_FR (fr);
314 value.as_sf[1] = GET_H_FR (fr + 1);
321 frvbf_h_fr_double_set_handler (SIM_CPU *current_cpu, UINT fr, DF newval)
328 /* Check the register alignment. */
329 fr = check_fr_register_alignment (current_cpu, fr, 1);
331 value.as_df = newval;
332 if (HOST_BYTE_ORDER == BFD_ENDIAN_LITTLE)
334 SET_H_FR (fr , value.as_sf[1]);
335 SET_H_FR (fr + 1, value.as_sf[0]);
339 SET_H_FR (fr , value.as_sf[0]);
340 SET_H_FR (fr + 1, value.as_sf[1]);
344 /* Cover fns to access the floating point register as integer words. */
346 frvbf_h_fr_int_get_handler (SIM_CPU *current_cpu, UINT fr)
353 value.as_sf = GET_H_FR (fr);
358 frvbf_h_fr_int_set_handler (SIM_CPU *current_cpu, UINT fr, USI newval)
365 value.as_usi = newval;
366 SET_H_FR (fr, value.as_sf);
369 /* Cover fns to access the coprocessor registers as double words. */
371 frvbf_h_cpr_double_get_handler (SIM_CPU *current_cpu, UINT cpr)
375 /* Check the register alignment. */
376 cpr = check_register_alignment (current_cpu, cpr, 1);
378 value = GET_H_CPR (cpr);
380 value |= (USI) GET_H_CPR (cpr + 1);
385 frvbf_h_cpr_double_set_handler (SIM_CPU *current_cpu, UINT cpr, DI newval)
387 /* Check the register alignment. */
388 cpr = check_register_alignment (current_cpu, cpr, 1);
390 SET_H_CPR (cpr , (newval >> 32) & 0xffffffff);
391 SET_H_CPR (cpr + 1, (newval ) & 0xffffffff);
394 /* Cover fns to write registers as quad words. */
396 frvbf_h_gr_quad_set_handler (SIM_CPU *current_cpu, UINT gr, SI *newval)
399 return; /* Storing into gr0 has no effect. */
401 /* Check the register alignment. */
402 gr = check_register_alignment (current_cpu, gr, 3);
404 SET_H_GR (gr , newval[0]);
405 SET_H_GR (gr + 1, newval[1]);
406 SET_H_GR (gr + 2, newval[2]);
407 SET_H_GR (gr + 3, newval[3]);
411 frvbf_h_fr_quad_set_handler (SIM_CPU *current_cpu, UINT fr, SI *newval)
413 /* Check the register alignment. */
414 fr = check_fr_register_alignment (current_cpu, fr, 3);
416 SET_H_FR (fr , newval[0]);
417 SET_H_FR (fr + 1, newval[1]);
418 SET_H_FR (fr + 2, newval[2]);
419 SET_H_FR (fr + 3, newval[3]);
423 frvbf_h_cpr_quad_set_handler (SIM_CPU *current_cpu, UINT cpr, SI *newval)
425 /* Check the register alignment. */
426 cpr = check_register_alignment (current_cpu, cpr, 3);
428 SET_H_CPR (cpr , newval[0]);
429 SET_H_CPR (cpr + 1, newval[1]);
430 SET_H_CPR (cpr + 2, newval[2]);
431 SET_H_CPR (cpr + 3, newval[3]);
434 /* Cover fns to access the special purpose registers. */
436 frvbf_h_spr_get_handler (SIM_CPU *current_cpu, UINT spr)
438 /* Check access restrictions. */
439 frv_check_spr_read_access (current_cpu, spr);
444 return spr_psr_get_handler (current_cpu);
446 return spr_tbr_get_handler (current_cpu);
448 return spr_bpsr_get_handler (current_cpu);
450 return spr_ccr_get_handler (current_cpu);
452 return spr_cccr_get_handler (current_cpu);
457 return spr_sr_get_handler (current_cpu, spr);
460 return CPU (h_spr[spr]);
466 frvbf_h_spr_set_handler (SIM_CPU *current_cpu, UINT spr, USI newval)
468 FRV_REGISTER_CONTROL *control;
472 /* Check access restrictions. */
473 frv_check_spr_write_access (current_cpu, spr);
475 /* Only set those fields which are writeable. */
476 control = CPU_REGISTER_CONTROL (current_cpu);
477 mask = control->spr[spr].read_only_mask;
478 oldval = GET_H_SPR (spr);
480 newval = (newval & ~mask) | (oldval & mask);
482 /* Some registers are represented by individual components which are
483 referenced more often than the register itself. */
487 spr_psr_set_handler (current_cpu, newval);
490 spr_tbr_set_handler (current_cpu, newval);
493 spr_bpsr_set_handler (current_cpu, newval);
496 spr_ccr_set_handler (current_cpu, newval);
499 spr_cccr_set_handler (current_cpu, newval);
505 spr_sr_set_handler (current_cpu, spr, newval);
508 frv_cache_reconfigure (current_cpu, CPU_INSN_CACHE (current_cpu));
511 CPU (h_spr[spr]) = newval;
516 /* Cover fns to access the gr_hi and gr_lo registers. */
518 frvbf_h_gr_hi_get_handler (SIM_CPU *current_cpu, UINT gr)
520 return (GET_H_GR(gr) >> 16) & 0xffff;
524 frvbf_h_gr_hi_set_handler (SIM_CPU *current_cpu, UINT gr, UHI newval)
526 USI value = (GET_H_GR (gr) & 0xffff) | (newval << 16);
527 SET_H_GR (gr, value);
531 frvbf_h_gr_lo_get_handler (SIM_CPU *current_cpu, UINT gr)
533 return GET_H_GR(gr) & 0xffff;
537 frvbf_h_gr_lo_set_handler (SIM_CPU *current_cpu, UINT gr, UHI newval)
539 USI value = (GET_H_GR (gr) & 0xffff0000) | (newval & 0xffff);
540 SET_H_GR (gr, value);
543 /* Cover fns to access the tbr bits. */
545 spr_tbr_get_handler (SIM_CPU *current_cpu)
547 int tbr = ((GET_H_TBR_TBA () & 0xfffff) << 12) |
548 ((GET_H_TBR_TT () & 0xff) << 4);
554 spr_tbr_set_handler (SIM_CPU *current_cpu, USI newval)
558 SET_H_TBR_TBA ((tbr >> 12) & 0xfffff) ;
559 SET_H_TBR_TT ((tbr >> 4) & 0xff) ;
562 /* Cover fns to access the bpsr bits. */
564 spr_bpsr_get_handler (SIM_CPU *current_cpu)
566 int bpsr = ((GET_H_BPSR_BS () & 0x1) << 12) |
567 ((GET_H_BPSR_BET () & 0x1) );
573 spr_bpsr_set_handler (SIM_CPU *current_cpu, USI newval)
577 SET_H_BPSR_BS ((bpsr >> 12) & 1);
578 SET_H_BPSR_BET ((bpsr ) & 1);
581 /* Cover fns to access the psr bits. */
583 spr_psr_get_handler (SIM_CPU *current_cpu)
585 int psr = ((GET_H_PSR_IMPLE () & 0xf) << 28) |
586 ((GET_H_PSR_VER () & 0xf) << 24) |
587 ((GET_H_PSR_ICE () & 0x1) << 16) |
588 ((GET_H_PSR_NEM () & 0x1) << 14) |
589 ((GET_H_PSR_CM () & 0x1) << 13) |
590 ((GET_H_PSR_BE () & 0x1) << 12) |
591 ((GET_H_PSR_ESR () & 0x1) << 11) |
592 ((GET_H_PSR_EF () & 0x1) << 8) |
593 ((GET_H_PSR_EM () & 0x1) << 7) |
594 ((GET_H_PSR_PIL () & 0xf) << 3) |
595 ((GET_H_PSR_S () & 0x1) << 2) |
596 ((GET_H_PSR_PS () & 0x1) << 1) |
597 ((GET_H_PSR_ET () & 0x1) );
603 spr_psr_set_handler (SIM_CPU *current_cpu, USI newval)
605 /* The handler for PSR.S references the value of PSR.ESR, so set PSR.S
607 SET_H_PSR_S ((newval >> 2) & 1);
609 SET_H_PSR_IMPLE ((newval >> 28) & 0xf);
610 SET_H_PSR_VER ((newval >> 24) & 0xf);
611 SET_H_PSR_ICE ((newval >> 16) & 1);
612 SET_H_PSR_NEM ((newval >> 14) & 1);
613 SET_H_PSR_CM ((newval >> 13) & 1);
614 SET_H_PSR_BE ((newval >> 12) & 1);
615 SET_H_PSR_ESR ((newval >> 11) & 1);
616 SET_H_PSR_EF ((newval >> 8) & 1);
617 SET_H_PSR_EM ((newval >> 7) & 1);
618 SET_H_PSR_PIL ((newval >> 3) & 0xf);
619 SET_H_PSR_PS ((newval >> 1) & 1);
620 SET_H_PSR_ET ((newval ) & 1);
624 frvbf_h_psr_s_set_handler (SIM_CPU *current_cpu, BI newval)
626 /* If switching from user to supervisor mode, or vice-versa, then switch
627 the supervisor/user context. */
628 int psr_s = GET_H_PSR_S ();
629 if (psr_s != (newval & 1))
631 frvbf_switch_supervisor_user_context (current_cpu);
632 CPU (h_psr_s) = newval & 1;
636 /* Cover fns to access the ccr bits. */
638 spr_ccr_get_handler (SIM_CPU *current_cpu)
640 int ccr = ((GET_H_ICCR (H_ICCR_ICC3) & 0xf) << 28) |
641 ((GET_H_ICCR (H_ICCR_ICC2) & 0xf) << 24) |
642 ((GET_H_ICCR (H_ICCR_ICC1) & 0xf) << 20) |
643 ((GET_H_ICCR (H_ICCR_ICC0) & 0xf) << 16) |
644 ((GET_H_FCCR (H_FCCR_FCC3) & 0xf) << 12) |
645 ((GET_H_FCCR (H_FCCR_FCC2) & 0xf) << 8) |
646 ((GET_H_FCCR (H_FCCR_FCC1) & 0xf) << 4) |
647 ((GET_H_FCCR (H_FCCR_FCC0) & 0xf) );
653 spr_ccr_set_handler (SIM_CPU *current_cpu, USI newval)
657 SET_H_ICCR (H_ICCR_ICC3, (newval >> 28) & 0xf);
658 SET_H_ICCR (H_ICCR_ICC2, (newval >> 24) & 0xf);
659 SET_H_ICCR (H_ICCR_ICC1, (newval >> 20) & 0xf);
660 SET_H_ICCR (H_ICCR_ICC0, (newval >> 16) & 0xf);
661 SET_H_FCCR (H_FCCR_FCC3, (newval >> 12) & 0xf);
662 SET_H_FCCR (H_FCCR_FCC2, (newval >> 8) & 0xf);
663 SET_H_FCCR (H_FCCR_FCC1, (newval >> 4) & 0xf);
664 SET_H_FCCR (H_FCCR_FCC0, (newval ) & 0xf);
668 frvbf_set_icc_for_shift_right (
669 SIM_CPU *current_cpu, SI value, SI shift, QI icc
672 /* Set the C flag of the given icc to the logical OR of the bits shifted
674 int mask = (1 << shift) - 1;
675 if ((value & mask) != 0)
682 frvbf_set_icc_for_shift_left (
683 SIM_CPU *current_cpu, SI value, SI shift, QI icc
686 /* Set the V flag of the given icc to the logical OR of the bits shifted
688 int mask = ((1 << shift) - 1) << (32 - shift);
689 if ((value & mask) != 0)
695 /* Cover fns to access the cccr bits. */
697 spr_cccr_get_handler (SIM_CPU *current_cpu)
699 int cccr = ((GET_H_CCCR (H_CCCR_CC7) & 0x3) << 14) |
700 ((GET_H_CCCR (H_CCCR_CC6) & 0x3) << 12) |
701 ((GET_H_CCCR (H_CCCR_CC5) & 0x3) << 10) |
702 ((GET_H_CCCR (H_CCCR_CC4) & 0x3) << 8) |
703 ((GET_H_CCCR (H_CCCR_CC3) & 0x3) << 6) |
704 ((GET_H_CCCR (H_CCCR_CC2) & 0x3) << 4) |
705 ((GET_H_CCCR (H_CCCR_CC1) & 0x3) << 2) |
706 ((GET_H_CCCR (H_CCCR_CC0) & 0x3) );
712 spr_cccr_set_handler (SIM_CPU *current_cpu, USI newval)
716 SET_H_CCCR (H_CCCR_CC7, (newval >> 14) & 0x3);
717 SET_H_CCCR (H_CCCR_CC6, (newval >> 12) & 0x3);
718 SET_H_CCCR (H_CCCR_CC5, (newval >> 10) & 0x3);
719 SET_H_CCCR (H_CCCR_CC4, (newval >> 8) & 0x3);
720 SET_H_CCCR (H_CCCR_CC3, (newval >> 6) & 0x3);
721 SET_H_CCCR (H_CCCR_CC2, (newval >> 4) & 0x3);
722 SET_H_CCCR (H_CCCR_CC1, (newval >> 2) & 0x3);
723 SET_H_CCCR (H_CCCR_CC0, (newval ) & 0x3);
726 /* Cover fns to access the sr bits. */
728 spr_sr_get_handler (SIM_CPU *current_cpu, UINT spr)
730 /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
731 otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */
732 int psr_esr = GET_H_PSR_ESR ();
734 return GET_H_GR (4 + (spr - H_SPR_SR0));
736 return CPU (h_spr[spr]);
740 spr_sr_set_handler (SIM_CPU *current_cpu, UINT spr, USI newval)
742 /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
743 otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */
744 int psr_esr = GET_H_PSR_ESR ();
746 SET_H_GR (4 + (spr - H_SPR_SR0), newval);
748 CPU (h_spr[spr]) = newval;
751 /* Switch SR0-SR4 with GR4-GR7 if PSR.ESR is set. */
753 frvbf_switch_supervisor_user_context (SIM_CPU *current_cpu)
755 if (GET_H_PSR_ESR ())
757 /* We need to be in supervisor mode to swap the registers. Access the
758 PSR.S directly in order to avoid recursive context switches. */
760 int save_psr_s = CPU (h_psr_s);
762 for (i = 0; i < 4; ++i)
765 int spr = i + H_SPR_SR0;
766 SI tmp = GET_H_SPR (spr);
767 SET_H_SPR (spr, GET_H_GR (gr));
770 CPU (h_psr_s) = save_psr_s;
774 /* Handle load/store of quad registers. */
776 frvbf_load_quad_GR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
781 /* Check memory alignment */
782 address = check_memory_alignment (current_cpu, address, 0xf);
784 /* If we need to count cycles, then the cache operation will be
785 initiated from the model profiling functions.
786 See frvbf_model_.... */
789 CPU_LOAD_ADDRESS (current_cpu) = address;
790 CPU_LOAD_LENGTH (current_cpu) = 16;
794 for (i = 0; i < 4; ++i)
796 value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
799 sim_queue_fn_xi_write (current_cpu, frvbf_h_gr_quad_set_handler, targ_ix,
805 frvbf_store_quad_GR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
811 /* Check register and memory alignment. */
812 src_ix = check_register_alignment (current_cpu, src_ix, 3);
813 address = check_memory_alignment (current_cpu, address, 0xf);
815 for (i = 0; i < 4; ++i)
817 /* GR0 is always 0. */
821 value[i] = GET_H_GR (src_ix + i);
824 if (GET_HSR0_DCE (hsr0))
825 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
827 sim_queue_mem_xi_write (current_cpu, address, value);
831 frvbf_load_quad_FRint (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
836 /* Check memory alignment */
837 address = check_memory_alignment (current_cpu, address, 0xf);
839 /* If we need to count cycles, then the cache operation will be
840 initiated from the model profiling functions.
841 See frvbf_model_.... */
844 CPU_LOAD_ADDRESS (current_cpu) = address;
845 CPU_LOAD_LENGTH (current_cpu) = 16;
849 for (i = 0; i < 4; ++i)
851 value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
854 sim_queue_fn_xi_write (current_cpu, frvbf_h_fr_quad_set_handler, targ_ix,
860 frvbf_store_quad_FRint (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
866 /* Check register and memory alignment. */
867 src_ix = check_fr_register_alignment (current_cpu, src_ix, 3);
868 address = check_memory_alignment (current_cpu, address, 0xf);
870 for (i = 0; i < 4; ++i)
871 value[i] = GET_H_FR (src_ix + i);
874 if (GET_HSR0_DCE (hsr0))
875 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
877 sim_queue_mem_xi_write (current_cpu, address, value);
881 frvbf_load_quad_CPR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
886 /* Check memory alignment */
887 address = check_memory_alignment (current_cpu, address, 0xf);
889 /* If we need to count cycles, then the cache operation will be
890 initiated from the model profiling functions.
891 See frvbf_model_.... */
894 CPU_LOAD_ADDRESS (current_cpu) = address;
895 CPU_LOAD_LENGTH (current_cpu) = 16;
899 for (i = 0; i < 4; ++i)
901 value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
904 sim_queue_fn_xi_write (current_cpu, frvbf_h_cpr_quad_set_handler, targ_ix,
910 frvbf_store_quad_CPR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
916 /* Check register and memory alignment. */
917 src_ix = check_register_alignment (current_cpu, src_ix, 3);
918 address = check_memory_alignment (current_cpu, address, 0xf);
920 for (i = 0; i < 4; ++i)
921 value[i] = GET_H_CPR (src_ix + i);
924 if (GET_HSR0_DCE (hsr0))
925 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
927 sim_queue_mem_xi_write (current_cpu, address, value);
931 frvbf_signed_integer_divide (
932 SIM_CPU *current_cpu, SI arg1, SI arg2, int target_index, int non_excepting
935 enum frv_dtt dtt = FRV_DTT_NO_EXCEPTION;
936 if (arg1 == 0x80000000 && arg2 == -1)
938 /* 0x80000000/(-1) must result in 0x7fffffff when ISR.EDE is set
939 otherwise it may result in 0x7fffffff (sparc compatibility) or
940 0x80000000 (C language compatibility). */
942 dtt = FRV_DTT_OVERFLOW;
945 if (GET_ISR_EDE (isr))
946 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
949 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
951 frvbf_force_update (current_cpu); /* Force update of target register. */
954 dtt = FRV_DTT_DIVISION_BY_ZERO;
956 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
959 /* Check for exceptions. */
960 if (dtt != FRV_DTT_NO_EXCEPTION)
961 dtt = frvbf_division_exception (current_cpu, dtt, target_index,
963 if (non_excepting && dtt == FRV_DTT_NO_EXCEPTION)
965 /* Non excepting instruction. Clear the NE flag for the target
968 GET_NE_FLAGS (NE_flags, H_SPR_GNER0);
969 CLEAR_NE_FLAG (NE_flags, target_index);
970 SET_NE_FLAGS (H_SPR_GNER0, NE_flags);
975 frvbf_unsigned_integer_divide (
976 SIM_CPU *current_cpu, USI arg1, USI arg2, int target_index, int non_excepting
980 frvbf_division_exception (current_cpu, FRV_DTT_DIVISION_BY_ZERO,
981 target_index, non_excepting);
984 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
988 /* Non excepting instruction. Clear the NE flag for the target
991 GET_NE_FLAGS (NE_flags, H_SPR_GNER0);
992 CLEAR_NE_FLAG (NE_flags, target_index);
993 SET_NE_FLAGS (H_SPR_GNER0, NE_flags);
998 /* Clear accumulators. */
1000 frvbf_clear_accumulators (SIM_CPU *current_cpu, SI acc_ix, int A)
1002 SIM_DESC sd = CPU_STATE (current_cpu);
1004 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr500) ? 7 :
1005 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550) ? 7 :
1006 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr450) ? 11 :
1007 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400) ? 3 :
1009 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1011 ps->mclracc_acc = acc_ix;
1013 if (A == 0 || acc_ix != 0) /* Clear 1 accumuator? */
1015 /* This instruction is a nop if the referenced accumulator is not
1017 if ((acc_ix & acc_mask) == acc_ix)
1018 sim_queue_fn_di_write (current_cpu, frvbf_h_acc40S_set, acc_ix, 0);
1022 /* Clear all implemented accumulators. */
1024 for (i = 0; i <= acc_mask; ++i)
1025 if ((i & acc_mask) == i)
1026 sim_queue_fn_di_write (current_cpu, frvbf_h_acc40S_set, i, 0);
1030 /* Functions to aid insn semantics. */
1032 /* Compute the result of the SCAN and SCANI insns after the shift and xor. */
1034 frvbf_scan_result (SIM_CPU *current_cpu, SI value)
1042 /* Find the position of the first non-zero bit.
1043 The loop will terminate since there is guaranteed to be at least one
1045 mask = 1 << (sizeof (mask) * 8 - 1);
1046 for (i = 0; (value & mask) == 0; ++i)
1052 /* Compute the result of the cut insns. */
1054 frvbf_cut (SIM_CPU *current_cpu, SI reg1, SI reg2, SI cut_point)
1060 result = reg1 << cut_point;
1061 result |= (reg2 >> (32 - cut_point)) & ((1 << cut_point) - 1);
1064 result = reg2 << (cut_point - 32);
1069 /* Compute the result of the cut insns. */
1071 frvbf_media_cut (SIM_CPU *current_cpu, DI acc, SI cut_point)
1073 /* The cut point is the lower 6 bits (signed) of what we are passed. */
1074 cut_point = cut_point << 26 >> 26;
1076 /* The cut_point is relative to bit 40 of 64 bits. */
1078 return (acc << (cut_point + 24)) >> 32;
1080 /* Extend the sign bit (bit 40) for negative cuts. */
1081 if (cut_point == -32)
1082 return (acc << 24) >> 63; /* Special case for full shiftout. */
1084 return (acc << 24) >> (32 + -cut_point);
1087 /* Compute the result of the cut insns. */
1089 frvbf_media_cut_ss (SIM_CPU *current_cpu, DI acc, SI cut_point)
1091 /* The cut point is the lower 6 bits (signed) of what we are passed. */
1092 cut_point = cut_point << 26 >> 26;
1096 /* The cut_point is relative to bit 40 of 64 bits. */
1097 DI shifted = acc << (cut_point + 24);
1098 DI unshifted = shifted >> (cut_point + 24);
1100 /* The result will be saturated if significant bits are shifted out. */
1101 if (unshifted != acc)
1109 /* The result will not be saturated, so use the code for the normal cut. */
1110 return frvbf_media_cut (current_cpu, acc, cut_point);
1113 /* Compute the result of int accumulator cut (SCUTSS). */
1115 frvbf_iacc_cut (SIM_CPU *current_cpu, DI acc, SI cut_point)
1119 /* The cut point is the lower 7 bits (signed) of what we are passed. */
1120 cut_point = cut_point << 25 >> 25;
1122 /* Conceptually, the operation is on a 128-bit sign-extension of ACC.
1123 The top bit of the return value corresponds to bit (63 - CUT_POINT)
1124 of this 128-bit value.
1126 Since we can't deal with 128-bit values very easily, convert the
1127 operation into an equivalent 64-bit one. */
1130 /* Avoid an undefined shift operation. */
1131 if (cut_point == -64)
1138 /* Get the shifted but unsaturated result. Set LOWER to the lowest
1139 32 bits of the result and UPPER to the result >> 31. */
1142 /* The cut loses the (32 - CUT_POINT) least significant bits.
1143 Round the result up if the most significant of these lost bits
1145 lower = acc >> (32 - cut_point);
1146 if (lower < 0x7fffffff)
1147 if (acc & LSBIT64 (32 - cut_point - 1))
1149 upper = lower >> 31;
1153 lower = acc << (cut_point - 32);
1154 upper = acc >> (63 - cut_point);
1157 /* Saturate the result. */
1166 /* Compute the result of shift-left-arithmetic-with-saturation (SLASS). */
1168 frvbf_shift_left_arith_saturate (SIM_CPU *current_cpu, SI arg1, SI arg2)
1172 /* FIXME: what to do with negative shift amt? */
1179 /* Signed shift by 31 or greater saturates by definition. */
1182 return (SI) 0x7fffffff;
1184 return (SI) 0x80000000;
1186 /* OK, arg2 is between 1 and 31. */
1187 neg_arg1 = (arg1 < 0);
1190 /* Check for sign bit change (saturation). */
1191 if (neg_arg1 && (arg1 >= 0))
1192 return (SI) 0x80000000;
1193 else if (!neg_arg1 && (arg1 < 0))
1194 return (SI) 0x7fffffff;
1195 } while (--arg2 > 0);
1200 /* Simulate the media custom insns. */
1202 frvbf_media_cop (SIM_CPU *current_cpu, int cop_num)
1204 /* The semantics of the insn are a nop, since it is implementation defined.
1205 We do need to check whether it's implemented and set up for MTRAP
1207 USI msr0 = GET_MSR (0);
1208 if (GET_MSR_EMCI (msr0) == 0)
1210 /* no interrupt queued at this time. */
1211 frv_set_mp_exception_registers (current_cpu, MTT_UNIMPLEMENTED_MPOP, 0);
1215 /* Simulate the media average (MAVEH) insn. */
1217 do_media_average (SIM_CPU *current_cpu, HI arg1, HI arg2)
1219 SIM_DESC sd = CPU_STATE (current_cpu);
1220 SI sum = (arg1 + arg2);
1221 HI result = sum >> 1;
1224 /* On fr4xx and fr550, check the rounding mode. On other machines
1225 rounding is always toward negative infinity and the result is
1226 already correctly rounded. */
1227 switch (STATE_ARCHITECTURE (sd)->mach)
1229 /* Need to check rounding mode. */
1230 case bfd_mach_fr400:
1231 case bfd_mach_fr450:
1232 case bfd_mach_fr550:
1233 /* Check whether rounding will be required. Rounding will be required
1234 if the sum is an odd number. */
1235 rounding_value = sum & 1;
1238 USI msr0 = GET_MSR (0);
1239 /* Check MSR0.SRDAV to determine which bits control the rounding. */
1240 if (GET_MSR_SRDAV (msr0))
1242 /* MSR0.RD controls rounding. */
1243 switch (GET_MSR_RD (msr0))
1246 /* Round to nearest. */
1251 /* Round toward 0. */
1256 /* Round toward positive infinity. */
1260 /* Round toward negative infinity. The result is already
1261 correctly rounded. */
1270 /* MSR0.RDAV controls rounding. If set, round toward positive
1271 infinity. Otherwise the result is already rounded correctly
1272 toward negative infinity. */
1273 if (GET_MSR_RDAV (msr0))
1286 frvbf_media_average (SIM_CPU *current_cpu, SI reg1, SI reg2)
1289 result = do_media_average (current_cpu, reg1 & 0xffff, reg2 & 0xffff);
1291 result |= do_media_average (current_cpu, (reg1 >> 16) & 0xffff,
1292 (reg2 >> 16) & 0xffff) << 16;
1296 /* Maintain a flag in order to know when to write the address of the next
1297 VLIW instruction into the LR register. Used by JMPL. JMPIL, and CALL. */
1299 frvbf_set_write_next_vliw_addr_to_LR (SIM_CPU *current_cpu, int value)
1301 frvbf_write_next_vliw_addr_to_LR = value;
1305 frvbf_set_ne_index (SIM_CPU *current_cpu, int index)
1309 /* Save the target register so interrupt processing can set its NE flag
1310 in the event of an exception. */
1311 frv_interrupt_state.ne_index = index;
1313 /* Clear the NE flag of the target register. It will be reset if necessary
1314 in the event of an exception. */
1315 GET_NE_FLAGS (NE_flags, H_SPR_FNER0);
1316 CLEAR_NE_FLAG (NE_flags, index);
1317 SET_NE_FLAGS (H_SPR_FNER0, NE_flags);
1321 frvbf_force_update (SIM_CPU *current_cpu)
1323 CGEN_WRITE_QUEUE *q = CPU_WRITE_QUEUE (current_cpu);
1324 int ix = CGEN_WRITE_QUEUE_INDEX (q);
1327 CGEN_WRITE_QUEUE_ELEMENT *item = CGEN_WRITE_QUEUE_ELEMENT (q, ix - 1);
1328 item->flags |= FRV_WRITE_QUEUE_FORCE_WRITE;
1332 /* Condition code logic. */
1334 andcr, orcr, xorcr, nandcr, norcr, andncr, orncr, nandncr, norncr,
1338 enum cr_result {cr_undefined, cr_undefined1, cr_false, cr_true};
1340 static enum cr_result
1341 cr_logic[num_cr_ops][4][4] = {
1344 /* undefined undefined false true */
1345 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1346 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1347 /* false */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1348 /* true */ {cr_undefined, cr_undefined, cr_false, cr_true }
1352 /* undefined undefined false true */
1353 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1354 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1355 /* false */ {cr_false, cr_false, cr_false, cr_true },
1356 /* true */ {cr_true, cr_true, cr_true, cr_true }
1360 /* undefined undefined false true */
1361 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1362 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1363 /* false */ {cr_undefined, cr_undefined, cr_false, cr_true },
1364 /* true */ {cr_true, cr_true, cr_true, cr_false }
1368 /* undefined undefined false true */
1369 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1370 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1371 /* false */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1372 /* true */ {cr_undefined, cr_undefined, cr_true, cr_false }
1376 /* undefined undefined false true */
1377 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1378 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1379 /* false */ {cr_true, cr_true, cr_true, cr_false },
1380 /* true */ {cr_false, cr_false, cr_false, cr_false }
1384 /* undefined undefined false true */
1385 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1386 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1387 /* false */ {cr_undefined, cr_undefined, cr_false, cr_true },
1388 /* true */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}
1392 /* undefined undefined false true */
1393 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1394 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1395 /* false */ {cr_true, cr_true, cr_true, cr_true },
1396 /* true */ {cr_false, cr_false, cr_false, cr_true }
1400 /* undefined undefined false true */
1401 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1402 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1403 /* false */ {cr_undefined, cr_undefined, cr_true, cr_false },
1404 /* true */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}
1408 /* undefined undefined false true */
1409 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1410 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1411 /* false */ {cr_false, cr_false, cr_false, cr_false },
1412 /* true */ {cr_true, cr_true, cr_true, cr_false }
1417 frvbf_cr_logic (SIM_CPU *current_cpu, SI operation, UQI arg1, UQI arg2)
1419 return cr_logic[operation][arg1][arg2];
1422 /* Cache Manipulation. */
1424 frvbf_insn_cache_preload (SIM_CPU *current_cpu, SI address, USI length, int lock)
1426 /* If we need to count cycles, then the cache operation will be
1427 initiated from the model profiling functions.
1428 See frvbf_model_.... */
1429 int hsr0 = GET_HSR0 ();
1430 if (GET_HSR0_ICE (hsr0))
1434 CPU_LOAD_ADDRESS (current_cpu) = address;
1435 CPU_LOAD_LENGTH (current_cpu) = length;
1436 CPU_LOAD_LOCK (current_cpu) = lock;
1440 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1441 frv_cache_preload (cache, address, length, lock);
1447 frvbf_data_cache_preload (SIM_CPU *current_cpu, SI address, USI length, int lock)
1449 /* If we need to count cycles, then the cache operation will be
1450 initiated from the model profiling functions.
1451 See frvbf_model_.... */
1452 int hsr0 = GET_HSR0 ();
1453 if (GET_HSR0_DCE (hsr0))
1457 CPU_LOAD_ADDRESS (current_cpu) = address;
1458 CPU_LOAD_LENGTH (current_cpu) = length;
1459 CPU_LOAD_LOCK (current_cpu) = lock;
1463 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1464 frv_cache_preload (cache, address, length, lock);
1470 frvbf_insn_cache_unlock (SIM_CPU *current_cpu, SI address)
1472 /* If we need to count cycles, then the cache operation will be
1473 initiated from the model profiling functions.
1474 See frvbf_model_.... */
1475 int hsr0 = GET_HSR0 ();
1476 if (GET_HSR0_ICE (hsr0))
1479 CPU_LOAD_ADDRESS (current_cpu) = address;
1482 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1483 frv_cache_unlock (cache, address);
1489 frvbf_data_cache_unlock (SIM_CPU *current_cpu, SI address)
1491 /* If we need to count cycles, then the cache operation will be
1492 initiated from the model profiling functions.
1493 See frvbf_model_.... */
1494 int hsr0 = GET_HSR0 ();
1495 if (GET_HSR0_DCE (hsr0))
1498 CPU_LOAD_ADDRESS (current_cpu) = address;
1501 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1502 frv_cache_unlock (cache, address);
1508 frvbf_insn_cache_invalidate (SIM_CPU *current_cpu, SI address, int all)
1510 /* Make sure the insn was specified properly. -1 will be passed for ALL
1511 for a icei with A=0. */
1514 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1518 /* If we need to count cycles, then the cache operation will be
1519 initiated from the model profiling functions.
1520 See frvbf_model_.... */
1523 /* Record the all-entries flag for use in profiling. */
1524 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1525 ps->all_cache_entries = all;
1526 CPU_LOAD_ADDRESS (current_cpu) = address;
1530 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1532 frv_cache_invalidate_all (cache, 0/* flush? */);
1534 frv_cache_invalidate (cache, address, 0/* flush? */);
1539 frvbf_data_cache_invalidate (SIM_CPU *current_cpu, SI address, int all)
1541 /* Make sure the insn was specified properly. -1 will be passed for ALL
1542 for a dcei with A=0. */
1545 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1549 /* If we need to count cycles, then the cache operation will be
1550 initiated from the model profiling functions.
1551 See frvbf_model_.... */
1554 /* Record the all-entries flag for use in profiling. */
1555 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1556 ps->all_cache_entries = all;
1557 CPU_LOAD_ADDRESS (current_cpu) = address;
1561 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1563 frv_cache_invalidate_all (cache, 0/* flush? */);
1565 frv_cache_invalidate (cache, address, 0/* flush? */);
1570 frvbf_data_cache_flush (SIM_CPU *current_cpu, SI address, int all)
1572 /* Make sure the insn was specified properly. -1 will be passed for ALL
1573 for a dcef with A=0. */
1576 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1580 /* If we need to count cycles, then the cache operation will be
1581 initiated from the model profiling functions.
1582 See frvbf_model_.... */
1585 /* Record the all-entries flag for use in profiling. */
1586 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1587 ps->all_cache_entries = all;
1588 CPU_LOAD_ADDRESS (current_cpu) = address;
1592 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1594 frv_cache_invalidate_all (cache, 1/* flush? */);
1596 frv_cache_invalidate (cache, address, 1/* flush? */);