1 /* frv simulator machine independent profiling code.
3 Copyright (C) 1998, 1999, 2000, 2001, 2003 Free Software Foundation, Inc.
6 This file is part of the GNU simulators.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License along
19 with this program; if not, write to the Free Software Foundation, Inc.,
20 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 #define WANT_CPU_FRVBF
29 #if WITH_PROFILE_MODEL_P
32 #include "profile-fr400.h"
33 #include "profile-fr500.h"
36 reset_gr_flags (SIM_CPU *cpu, INT gr)
38 SIM_DESC sd = CPU_STATE (cpu);
39 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400)
40 fr400_reset_gr_flags (cpu, gr);
41 /* Other machines have no gr flags right now. */
45 reset_fr_flags (SIM_CPU *cpu, INT fr)
47 SIM_DESC sd = CPU_STATE (cpu);
48 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400)
49 fr400_reset_fr_flags (cpu, fr);
50 else if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr500)
51 fr500_reset_fr_flags (cpu, fr);
55 reset_acc_flags (SIM_CPU *cpu, INT acc)
57 SIM_DESC sd = CPU_STATE (cpu);
58 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400)
59 fr400_reset_acc_flags (cpu, acc);
60 /* Other machines have no acc flags right now. */
64 reset_cc_flags (SIM_CPU *cpu, INT cc)
66 SIM_DESC sd = CPU_STATE (cpu);
67 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr500)
68 fr500_reset_cc_flags (cpu, cc);
69 /* Other machines have no cc flags. */
73 set_use_is_gr_complex (SIM_CPU *cpu, INT gr)
77 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
78 reset_gr_flags (cpu, gr);
79 ps->cur_gr_complex |= (((DI)1) << gr);
84 set_use_not_gr_complex (SIM_CPU *cpu, INT gr)
88 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
89 ps->cur_gr_complex &= ~(((DI)1) << gr);
94 use_is_gr_complex (SIM_CPU *cpu, INT gr)
98 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
99 return ps->cur_gr_complex & (((DI)1) << gr);
104 /* Globals flag indicates whether this insn is being modeled. */
105 enum FRV_INSN_MODELING model_insn = FRV_INSN_NO_MODELING;
107 /* static buffer for the name of the currently most restrictive hazard. */
108 static char hazard_name[100] = "";
110 /* Print information about the wait applied to an entire VLIW insn. */
111 FRV_INSN_FETCH_BUFFER frv_insn_fetch_buffer[]
113 {1, NO_REQNO}, {1, NO_REQNO} /* init with impossible address. */
125 /* A queue of load requests from the data cache. Use to keep track of loads
126 which are still pending. */
127 /* TODO -- some of these are mutually exclusive and can use a union. */
142 enum cache_request request;
143 } CACHE_QUEUE_ELEMENT;
145 #define CACHE_QUEUE_SIZE 64 /* TODO -- make queue dynamic */
150 CACHE_QUEUE_ELEMENT q[CACHE_QUEUE_SIZE];
151 } cache_queue = {0, 0};
153 /* Queue a request for a load from the cache. The load will be queued as
154 'inactive' and will be requested after the given number
155 of cycles have passed from the point the load is activated. */
157 request_cache_load (SIM_CPU *cpu, INT regnum, int regtype, int cycles)
159 CACHE_QUEUE_ELEMENT *q;
163 /* For a conditional load which was not executed, CPU_LOAD_LENGTH will be
165 if (CPU_LOAD_LENGTH (cpu) == 0)
168 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
169 abort (); /* TODO: Make the queue dynamic */
171 q = & cache_queue.q[cache_queue.ix];
174 q->reqno = cache_queue.reqno++;
175 q->request = cache_load;
176 q->cache = CPU_DATA_CACHE (cpu);
177 q->address = CPU_LOAD_ADDRESS (cpu);
178 q->length = CPU_LOAD_LENGTH (cpu);
179 q->is_signed = CPU_LOAD_SIGNED (cpu);
181 q->regtype = regtype;
185 vliw = CPU_VLIW (cpu);
186 slot = vliw->next_slot - 1;
187 q->slot = (*vliw->current_vliw)[slot];
189 CPU_LOAD_LENGTH (cpu) = 0;
192 /* Queue a request to flush the cache. The request will be queued as
193 'inactive' and will be requested after the given number
194 of cycles have passed from the point the request is activated. */
196 request_cache_flush (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
198 CACHE_QUEUE_ELEMENT *q;
202 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
203 abort (); /* TODO: Make the queue dynamic */
205 q = & cache_queue.q[cache_queue.ix];
208 q->reqno = cache_queue.reqno++;
209 q->request = cache_flush;
211 q->address = CPU_LOAD_ADDRESS (cpu);
212 q->all = CPU_PROFILE_STATE (cpu)->all_cache_entries;
216 vliw = CPU_VLIW (cpu);
217 slot = vliw->next_slot - 1;
218 q->slot = (*vliw->current_vliw)[slot];
221 /* Queue a request to invalidate the cache. The request will be queued as
222 'inactive' and will be requested after the given number
223 of cycles have passed from the point the request is activated. */
225 request_cache_invalidate (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
227 CACHE_QUEUE_ELEMENT *q;
231 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
232 abort (); /* TODO: Make the queue dynamic */
234 q = & cache_queue.q[cache_queue.ix];
237 q->reqno = cache_queue.reqno++;
238 q->request = cache_invalidate;
240 q->address = CPU_LOAD_ADDRESS (cpu);
241 q->all = CPU_PROFILE_STATE (cpu)->all_cache_entries;
245 vliw = CPU_VLIW (cpu);
246 slot = vliw->next_slot - 1;
247 q->slot = (*vliw->current_vliw)[slot];
250 /* Queue a request to preload the cache. The request will be queued as
251 'inactive' and will be requested after the given number
252 of cycles have passed from the point the request is activated. */
254 request_cache_preload (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
256 CACHE_QUEUE_ELEMENT *q;
260 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
261 abort (); /* TODO: Make the queue dynamic */
263 q = & cache_queue.q[cache_queue.ix];
266 q->reqno = cache_queue.reqno++;
267 q->request = cache_preload;
269 q->address = CPU_LOAD_ADDRESS (cpu);
270 q->length = CPU_LOAD_LENGTH (cpu);
271 q->lock = CPU_LOAD_LOCK (cpu);
275 vliw = CPU_VLIW (cpu);
276 slot = vliw->next_slot - 1;
277 q->slot = (*vliw->current_vliw)[slot];
279 CPU_LOAD_LENGTH (cpu) = 0;
282 /* Queue a request to unlock the cache. The request will be queued as
283 'inactive' and will be requested after the given number
284 of cycles have passed from the point the request is activated. */
286 request_cache_unlock (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
288 CACHE_QUEUE_ELEMENT *q;
292 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
293 abort (); /* TODO: Make the queue dynamic */
295 q = & cache_queue.q[cache_queue.ix];
298 q->reqno = cache_queue.reqno++;
299 q->request = cache_unlock;
301 q->address = CPU_LOAD_ADDRESS (cpu);
305 vliw = CPU_VLIW (cpu);
306 slot = vliw->next_slot - 1;
307 q->slot = (*vliw->current_vliw)[slot];
311 submit_cache_request (CACHE_QUEUE_ELEMENT *q)
316 frv_cache_request_load (q->cache, q->reqno, q->address, q->slot);
319 frv_cache_request_invalidate (q->cache, q->reqno, q->address, q->slot,
322 case cache_invalidate:
323 frv_cache_request_invalidate (q->cache, q->reqno, q->address, q->slot,
327 frv_cache_request_preload (q->cache, q->address, q->slot,
331 frv_cache_request_unlock (q->cache, q->address, q->slot);
338 /* Activate all inactive load requests. */
340 activate_cache_requests (SIM_CPU *cpu)
343 for (i = 0; i < cache_queue.ix; ++i)
345 CACHE_QUEUE_ELEMENT *q = & cache_queue.q[i];
349 /* Submit the request now if the cycle count is zero. */
351 submit_cache_request (q);
356 /* Check to see if a load is pending which affects the given register(s).
359 load_pending_for_register (SIM_CPU *cpu, int regnum, int words, int regtype)
362 for (i = 0; i < cache_queue.ix; ++i)
364 CACHE_QUEUE_ELEMENT *q = & cache_queue.q[i];
366 /* Must be the same kind of register. */
367 if (! q->active || q->request != cache_load || q->regtype != regtype)
370 /* If the registers numbers are equal, then we have a match. */
371 if (q->regnum == regnum)
372 return 1; /* load pending */
374 /* Check for overlap of a load with a multi-word register. */
375 if (regnum < q->regnum)
377 if (regnum + words > q->regnum)
380 /* Check for overlap of a multi-word load with the register. */
383 int data_words = (q->length + sizeof (SI) - 1) / sizeof (SI);
384 if (q->regnum + data_words > regnum)
389 return 0; /* no load pending */
392 /* Check to see if a cache flush pending which affects the given address. */
394 flush_pending_for_address (SIM_CPU *cpu, SI address)
396 int line_mask = ~(CPU_DATA_CACHE (cpu)->line_size - 1);
398 for (i = 0; i < cache_queue.ix; ++i)
400 CACHE_QUEUE_ELEMENT *q = & cache_queue.q[i];
402 /* Must be the same kind of request and active. */
403 if (! q->active || q->request != cache_flush)
406 /* If the addresses are equal, then we have a match. */
407 if ((q->address & line_mask) == (address & line_mask))
408 return 1; /* flush pending */
411 return 0; /* no flush pending */
415 remove_cache_queue_element (SIM_CPU *cpu, int i)
417 /* If we are removing the load of a FR register, then remember which one(s).
419 CACHE_QUEUE_ELEMENT q = cache_queue.q[i];
421 for (--cache_queue.ix; i < cache_queue.ix; ++i)
422 cache_queue.q[i] = cache_queue.q[i + 1];
424 /* If we removed a load of a FR register, check to see if any other loads
425 of that register is still queued. If not, then apply the queued post
426 processing time of that register to its latency. Also apply
427 1 extra cycle of latency to the register since it was a floating point
429 if (q.request == cache_load && q.regtype != REGTYPE_NONE)
431 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
432 int data_words = (q.length + sizeof (SI) - 1) / sizeof (SI);
434 for (j = 0; j < data_words; ++j)
436 int regnum = q.regnum + j;
437 if (! load_pending_for_register (cpu, regnum, 1, q.regtype))
439 if (q.regtype == REGTYPE_FR)
441 int *fr = ps->fr_busy;
442 fr[regnum] += 1 + ps->fr_ptime[regnum];
443 ps->fr_ptime[regnum] = 0;
450 /* Copy data from the cache buffer to the target register(s). */
452 copy_load_data (SIM_CPU *current_cpu, FRV_CACHE *cache, int slot,
453 CACHE_QUEUE_ELEMENT *q)
458 if (q->regtype == REGTYPE_FR)
462 QI value = CACHE_RETURN_DATA (cache, slot, q->address, QI, 1);
463 SET_H_FR (q->regnum, value);
467 UQI value = CACHE_RETURN_DATA (cache, slot, q->address, UQI, 1);
468 SET_H_FR (q->regnum, value);
475 QI value = CACHE_RETURN_DATA (cache, slot, q->address, QI, 1);
476 SET_H_GR (q->regnum, value);
480 UQI value = CACHE_RETURN_DATA (cache, slot, q->address, UQI, 1);
481 SET_H_GR (q->regnum, value);
486 if (q->regtype == REGTYPE_FR)
490 HI value = CACHE_RETURN_DATA (cache, slot, q->address, HI, 2);
491 SET_H_FR (q->regnum, value);
495 UHI value = CACHE_RETURN_DATA (cache, slot, q->address, UHI, 2);
496 SET_H_FR (q->regnum, value);
503 HI value = CACHE_RETURN_DATA (cache, slot, q->address, HI, 2);
504 SET_H_GR (q->regnum, value);
508 UHI value = CACHE_RETURN_DATA (cache, slot, q->address, UHI, 2);
509 SET_H_GR (q->regnum, value);
514 if (q->regtype == REGTYPE_FR)
517 CACHE_RETURN_DATA (cache, slot, q->address, SF, 4));
522 CACHE_RETURN_DATA (cache, slot, q->address, SI, 4));
526 if (q->regtype == REGTYPE_FR)
528 SET_H_FR_DOUBLE (q->regnum,
529 CACHE_RETURN_DATA (cache, slot, q->address, DF, 8));
533 SET_H_GR_DOUBLE (q->regnum,
534 CACHE_RETURN_DATA (cache, slot, q->address, DI, 8));
538 if (q->regtype == REGTYPE_FR)
539 frvbf_h_fr_quad_set_handler (current_cpu, q->regnum,
540 CACHE_RETURN_DATA_ADDRESS (cache, slot,
544 frvbf_h_gr_quad_set_handler (current_cpu, q->regnum,
545 CACHE_RETURN_DATA_ADDRESS (cache, slot,
555 request_complete (SIM_CPU *cpu, CACHE_QUEUE_ELEMENT *q)
558 if (! q->active || q->cycles > 0)
561 cache = CPU_DATA_CACHE (cpu);
565 /* For loads, we must wait until the data is returned from the cache. */
566 if (frv_cache_data_in_buffer (cache, 0, q->address, q->reqno))
568 copy_load_data (cpu, cache, 0, q);
571 if (frv_cache_data_in_buffer (cache, 1, q->address, q->reqno))
573 copy_load_data (cpu, cache, 1, q);
579 /* We must wait until the data is flushed. */
580 if (frv_cache_data_flushed (cache, 0, q->address, q->reqno))
582 if (frv_cache_data_flushed (cache, 1, q->address, q->reqno))
587 /* All other requests are complete once they've been made. */
594 /* Run the insn and data caches through the given number of cycles, taking
595 note of load requests which are fullfilled as a result. */
597 run_caches (SIM_CPU *cpu, int cycles)
599 FRV_CACHE* data_cache = CPU_DATA_CACHE (cpu);
600 FRV_CACHE* insn_cache = CPU_INSN_CACHE (cpu);
602 /* For each cycle, run the caches, noting which requests have been fullfilled
603 and submitting new requests on their designated cycles. */
604 for (i = 0; i < cycles; ++i)
607 /* Run the caches through 1 cycle. */
608 frv_cache_run (data_cache, 1);
609 frv_cache_run (insn_cache, 1);
611 /* Note whether prefetched insn data has been loaded yet. */
612 for (j = LS; j < FRV_CACHE_PIPELINES; ++j)
614 if (frv_insn_fetch_buffer[j].reqno != NO_REQNO
615 && frv_cache_data_in_buffer (insn_cache, j,
616 frv_insn_fetch_buffer[j].address,
617 frv_insn_fetch_buffer[j].reqno))
618 frv_insn_fetch_buffer[j].reqno = NO_REQNO;
621 /* Check to see which requests have been satisfied and which should
623 for (j = 0; j < cache_queue.ix; ++j)
625 CACHE_QUEUE_ELEMENT *q = & cache_queue.q[j];
629 /* If a load has been satisfied, complete the operation and remove it
631 if (request_complete (cpu, q))
633 remove_cache_queue_element (cpu, j);
638 /* Decrease the cycle count of each queued request.
639 Submit a request for each queued request whose cycle count has
643 submit_cache_request (q);
649 apply_latency_adjustments (SIM_CPU *cpu)
651 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
653 /* update the latencies of the registers. */
654 int *fr = ps->fr_busy;
655 int *acc = ps->acc_busy;
656 for (i = 0; i < 64; ++i)
658 if (ps->fr_busy_adjust[i] > 0)
659 *fr -= ps->fr_busy_adjust[i]; /* OK if it goes negative. */
660 if (ps->acc_busy_adjust[i] > 0)
661 *acc -= ps->acc_busy_adjust[i]; /* OK if it goes negative. */
667 /* Account for the number of cycles which have just passed in the latency of
668 various system elements. Works for negative cycles too so that latency
669 can be extended in the case of insn fetch latency.
670 If negative or zero, then no adjustment is necessary. */
672 update_latencies (SIM_CPU *cpu, int cycles)
674 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
676 /* update the latencies of the registers. */
681 int *gr = ps->gr_busy;
682 int *fr = ps->fr_busy;
683 int *acc = ps->acc_busy;
685 /* This loop handles GR, FR and ACC registers. */
686 for (i = 0; i < 64; ++i)
691 reset_gr_flags (cpu, i);
695 /* If the busy drops to 0, then mark the register as
699 int *fr_lat = ps->fr_latency + i;
701 ps->fr_busy_adjust[i] = 0;
702 /* Only clear flags if this register has no target latency. */
704 reset_fr_flags (cpu, i);
708 /* If the busy drops to 0, then mark the register as
712 int *acc_lat = ps->acc_latency + i;
714 ps->acc_busy_adjust[i] = 0;
715 /* Only clear flags if this register has no target latency. */
717 reset_acc_flags (cpu, i);
725 /* This loop handles CCR registers. */
727 for (i = 0; i < 8; ++i)
732 reset_cc_flags (cpu, i);
738 /* This loop handles SPR registers. */
740 for (i = 0; i < 4096; ++i)
748 /* This loop handles resources. */
749 idiv = ps->idiv_busy;
750 fdiv = ps->fdiv_busy;
751 fsqrt = ps->fsqrt_busy;
752 for (i = 0; i < 2; ++i)
754 *idiv = (*idiv <= cycles) ? 0 : (*idiv - cycles);
755 *fdiv = (*fdiv <= cycles) ? 0 : (*fdiv - cycles);
756 *fsqrt = (*fsqrt <= cycles) ? 0 : (*fsqrt - cycles);
763 /* Print information about the wait for the given number of cycles. */
765 frv_model_trace_wait_cycles (SIM_CPU *cpu, int cycles, const char *hazard_name)
767 if (TRACE_INSN_P (cpu) && cycles > 0)
769 SIM_DESC sd = CPU_STATE (cpu);
770 trace_printf (sd, cpu, "**** %s wait %d cycles ***\n",
771 hazard_name, cycles);
776 trace_vliw_wait_cycles (SIM_CPU *cpu)
778 if (TRACE_INSN_P (cpu))
780 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
781 frv_model_trace_wait_cycles (cpu, ps->vliw_wait, hazard_name);
785 /* Wait for the given number of cycles. */
787 frv_model_advance_cycles (SIM_CPU *cpu, int cycles)
789 PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
790 update_latencies (cpu, cycles);
791 run_caches (cpu, cycles);
792 PROFILE_MODEL_TOTAL_CYCLES (p) += cycles;
796 handle_resource_wait (SIM_CPU *cpu)
798 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
799 if (ps->vliw_wait != 0)
800 frv_model_advance_cycles (cpu, ps->vliw_wait);
801 if (ps->vliw_load_stall > ps->vliw_wait)
802 ps->vliw_load_stall -= ps->vliw_wait;
804 ps->vliw_load_stall = 0;
807 /* Account for the number of cycles until these resources will be available
810 update_target_latencies (SIM_CPU *cpu)
812 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
814 /* update the latencies of the registers. */
816 int *gr_lat = ps->gr_latency;
817 int *fr_lat = ps->fr_latency;
818 int *acc_lat = ps->acc_latency;
821 int *gr = ps->gr_busy;
822 int *fr = ps->fr_busy;
823 int *acc = ps->acc_busy;
825 /* This loop handles GR, FR and ACC registers. */
826 for (i = 0; i < 64; ++i)
847 /* This loop handles CCR registers. */
849 ccr_lat = ps->ccr_latency;
850 for (i = 0; i < 8; ++i)
859 /* This loop handles SPR registers. */
861 spr_lat = ps->spr_latency;
862 for (i = 0; i < 4096; ++i)
873 /* Run the caches until all pending cache flushes are complete. */
875 wait_for_flush (SIM_CPU *cpu)
877 SI address = CPU_LOAD_ADDRESS (cpu);
879 while (flush_pending_for_address (cpu, address))
881 frv_model_advance_cycles (cpu, 1);
884 if (TRACE_INSN_P (cpu) && wait)
886 sprintf (hazard_name, "Data cache flush address %p:", address);
887 frv_model_trace_wait_cycles (cpu, wait, hazard_name);
891 /* Initialize cycle counting for an insn.
892 FIRST_P is non-zero if this is the first insn in a set of parallel
895 frvbf_model_insn_before (SIM_CPU *cpu, int first_p)
897 SIM_DESC sd = CPU_STATE (cpu);
898 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
902 memset (ps->fr_busy_adjust, 0, sizeof (ps->fr_busy_adjust));
903 memset (ps->acc_busy_adjust, 0, sizeof (ps->acc_busy_adjust));
909 ps->vliw_branch_taken = 0;
910 ps->vliw_load_stall = 0;
913 switch (STATE_ARCHITECTURE (sd)->mach)
916 fr400_model_insn_before (cpu, first_p);
919 fr500_model_insn_before (cpu, first_p);
926 wait_for_flush (cpu);
929 /* Record the cycles computed for an insn.
930 LAST_P is non-zero if this is the last insn in a set of parallel insns,
931 and we update the total cycle count.
932 CYCLES is the cycle count of the insn. */
935 frvbf_model_insn_after (SIM_CPU *cpu, int last_p, int cycles)
937 PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
938 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
939 SIM_DESC sd = CPU_STATE (cpu);
941 PROFILE_MODEL_CUR_INSN_CYCLES (p) = cycles;
943 /* The number of cycles for a VLIW insn is the maximum number of cycles
944 used by any individual insn within it. */
945 if (cycles > ps->vliw_cycles)
946 ps->vliw_cycles = cycles;
950 /* This is the last insn in a VLIW insn. */
951 struct frv_interrupt_timer *timer = & frv_interrupt_state.timer;
953 activate_cache_requests (cpu); /* before advancing cycles. */
954 apply_latency_adjustments (cpu); /* must go first. */
955 update_target_latencies (cpu); /* must go next. */
956 frv_model_advance_cycles (cpu, ps->vliw_cycles);
958 PROFILE_MODEL_LOAD_STALL_CYCLES (p) += ps->vliw_load_stall;
960 /* Check the interrupt timer. cycles contains the total cycle count. */
963 cycles = PROFILE_MODEL_TOTAL_CYCLES (p);
964 if (timer->current % timer->value
965 + (cycles - timer->current) >= timer->value)
966 frv_queue_external_interrupt (cpu, timer->interrupt);
967 timer->current = cycles;
970 ps->past_first_p = 0; /* Next one will be the first in a new VLIW. */
971 ps->branch_address = -1;
974 ps->past_first_p = 1;
976 switch (STATE_ARCHITECTURE (sd)->mach)
979 fr400_model_insn_after (cpu, last_p, cycles);
982 fr500_model_insn_after (cpu, last_p, cycles);
990 frvbf_model_branch (SIM_CPU *current_cpu, PCADDR target, int hint)
992 /* Record the hint and branch address for use in profiling. */
993 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
994 ps->branch_hint = hint;
995 ps->branch_address = target;
998 /* Top up the latency of the given GR by the given number of cycles. */
1000 update_GR_latency (SIM_CPU *cpu, INT out_GR, int cycles)
1004 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1005 int *gr = ps->gr_latency;
1006 if (gr[out_GR] < cycles)
1007 gr[out_GR] = cycles;
1012 decrease_GR_busy (SIM_CPU *cpu, INT in_GR, int cycles)
1016 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1017 int *gr = ps->gr_busy;
1018 gr[in_GR] -= cycles;
1022 /* Top up the latency of the given double GR by the number of cycles. */
1024 update_GRdouble_latency (SIM_CPU *cpu, INT out_GR, int cycles)
1028 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1029 int *gr = ps->gr_latency;
1030 if (gr[out_GR] < cycles)
1031 gr[out_GR] = cycles;
1032 if (out_GR < 63 && gr[out_GR + 1] < cycles)
1033 gr[out_GR + 1] = cycles;
1038 update_GR_latency_for_load (SIM_CPU *cpu, INT out_GR, int cycles)
1042 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1043 int *gr = ps->gr_latency;
1045 /* The latency of the GR will be at least the number of cycles used
1047 if (gr[out_GR] < cycles)
1048 gr[out_GR] = cycles;
1050 /* The latency will also depend on how long it takes to retrieve the
1051 data from the cache or memory. Assume that the load is issued
1052 after the last cycle of the insn. */
1053 request_cache_load (cpu, out_GR, REGTYPE_NONE, cycles);
1058 update_GRdouble_latency_for_load (SIM_CPU *cpu, INT out_GR, int cycles)
1062 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1063 int *gr = ps->gr_latency;
1065 /* The latency of the GR will be at least the number of cycles used
1067 if (gr[out_GR] < cycles)
1068 gr[out_GR] = cycles;
1069 if (out_GR < 63 && gr[out_GR + 1] < cycles)
1070 gr[out_GR + 1] = cycles;
1072 /* The latency will also depend on how long it takes to retrieve the
1073 data from the cache or memory. Assume that the load is issued
1074 after the last cycle of the insn. */
1075 request_cache_load (cpu, out_GR, REGTYPE_NONE, cycles);
1080 update_GR_latency_for_swap (SIM_CPU *cpu, INT out_GR, int cycles)
1082 update_GR_latency_for_load (cpu, out_GR, cycles);
1085 /* Top up the latency of the given FR by the given number of cycles. */
1087 update_FR_latency (SIM_CPU *cpu, INT out_FR, int cycles)
1091 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1092 int *fr = ps->fr_latency;
1093 if (fr[out_FR] < cycles)
1094 fr[out_FR] = cycles;
1098 /* Top up the latency of the given double FR by the number of cycles. */
1100 update_FRdouble_latency (SIM_CPU *cpu, INT out_FR, int cycles)
1104 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1105 int *fr = ps->fr_latency;
1106 if (fr[out_FR] < cycles)
1107 fr[out_FR] = cycles;
1108 if (out_FR < 63 && fr[out_FR + 1] < cycles)
1109 fr[out_FR + 1] = cycles;
1114 update_FR_latency_for_load (SIM_CPU *cpu, INT out_FR, int cycles)
1118 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1119 int *fr = ps->fr_latency;
1121 /* The latency of the FR will be at least the number of cycles used
1123 if (fr[out_FR] < cycles)
1124 fr[out_FR] = cycles;
1126 /* The latency will also depend on how long it takes to retrieve the
1127 data from the cache or memory. Assume that the load is issued
1128 after the last cycle of the insn. */
1129 request_cache_load (cpu, out_FR, REGTYPE_FR, cycles);
1134 update_FRdouble_latency_for_load (SIM_CPU *cpu, INT out_FR, int cycles)
1138 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1139 int *fr = ps->fr_latency;
1141 /* The latency of the FR will be at least the number of cycles used
1143 if (fr[out_FR] < cycles)
1144 fr[out_FR] = cycles;
1145 if (out_FR < 63 && fr[out_FR + 1] < cycles)
1146 fr[out_FR + 1] = cycles;
1148 /* The latency will also depend on how long it takes to retrieve the
1149 data from the cache or memory. Assume that the load is issued
1150 after the last cycle of the insn. */
1151 request_cache_load (cpu, out_FR, REGTYPE_FR, cycles);
1155 /* Top up the post-processing time of the given FR by the given number of
1158 update_FR_ptime (SIM_CPU *cpu, INT out_FR, int cycles)
1162 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1163 /* If a load is pending on this register, then add the cycles to
1164 the post processing time for this register. Otherwise apply it
1165 directly to the latency of the register. */
1166 if (! load_pending_for_register (cpu, out_FR, 1, REGTYPE_FR))
1168 int *fr = ps->fr_latency;
1169 fr[out_FR] += cycles;
1172 ps->fr_ptime[out_FR] += cycles;
1177 update_FRdouble_ptime (SIM_CPU *cpu, INT out_FR, int cycles)
1181 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1182 /* If a load is pending on this register, then add the cycles to
1183 the post processing time for this register. Otherwise apply it
1184 directly to the latency of the register. */
1185 if (! load_pending_for_register (cpu, out_FR, 2, REGTYPE_FR))
1187 int *fr = ps->fr_latency;
1188 fr[out_FR] += cycles;
1190 fr[out_FR + 1] += cycles;
1194 ps->fr_ptime[out_FR] += cycles;
1196 ps->fr_ptime[out_FR + 1] += cycles;
1201 /* Top up the post-processing time of the given ACC by the given number of
1204 update_ACC_ptime (SIM_CPU *cpu, INT out_ACC, int cycles)
1208 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1209 /* No load can be pending on this register. Apply the cycles
1210 directly to the latency of the register. */
1211 int *acc = ps->acc_latency;
1212 acc[out_ACC] += cycles;
1216 /* Top up the post-processing time of the given SPR by the given number of
1219 update_SPR_ptime (SIM_CPU *cpu, INT out_SPR, int cycles)
1223 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1224 /* No load can be pending on this register. Apply the cycles
1225 directly to the latency of the register. */
1226 int *spr = ps->spr_latency;
1227 spr[out_SPR] += cycles;
1232 decrease_ACC_busy (SIM_CPU *cpu, INT out_ACC, int cycles)
1236 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1237 int *acc = ps->acc_busy;
1238 acc[out_ACC] -= cycles;
1239 if (ps->acc_busy_adjust[out_ACC] >= 0
1240 && cycles > ps->acc_busy_adjust[out_ACC])
1241 ps->acc_busy_adjust[out_ACC] = cycles;
1245 /* start-sanitize-frv */
1247 increase_ACC_busy (SIM_CPU *cpu, INT out_ACC, int cycles)
1251 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1252 int *acc = ps->acc_busy;
1253 acc[out_ACC] += cycles;
1258 enforce_full_acc_latency (SIM_CPU *cpu, INT in_ACC)
1260 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1261 ps->acc_busy_adjust [in_ACC] = -1;
1264 /* end-sanitize-frv */
1266 decrease_FR_busy (SIM_CPU *cpu, INT out_FR, int cycles)
1270 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1271 int *fr = ps->fr_busy;
1272 fr[out_FR] -= cycles;
1273 if (ps->fr_busy_adjust[out_FR] >= 0
1274 && cycles > ps->fr_busy_adjust[out_FR])
1275 ps->fr_busy_adjust[out_FR] = cycles;
1280 increase_FR_busy (SIM_CPU *cpu, INT out_FR, int cycles)
1284 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1285 int *fr = ps->fr_busy;
1286 fr[out_FR] += cycles;
1290 /* Top up the latency of the given ACC by the given number of cycles. */
1292 update_ACC_latency (SIM_CPU *cpu, INT out_ACC, int cycles)
1296 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1297 int *acc = ps->acc_latency;
1298 if (acc[out_ACC] < cycles)
1299 acc[out_ACC] = cycles;
1303 /* Top up the latency of the given CCR by the given number of cycles. */
1305 update_CCR_latency (SIM_CPU *cpu, INT out_CCR, int cycles)
1309 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1310 int *ccr = ps->ccr_latency;
1311 if (ccr[out_CCR] < cycles)
1312 ccr[out_CCR] = cycles;
1316 /* Top up the latency of the given SPR by the given number of cycles. */
1318 update_SPR_latency (SIM_CPU *cpu, INT out_SPR, int cycles)
1322 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1323 int *spr = ps->spr_latency;
1324 if (spr[out_SPR] < cycles)
1325 spr[out_SPR] = cycles;
1329 /* Top up the latency of the given integer division resource by the given
1330 number of cycles. */
1332 update_idiv_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1334 /* operate directly on the busy cycles since each resource can only
1335 be used once in a VLIW insn. */
1336 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1337 int *r = ps->idiv_busy;
1338 r[in_resource] = cycles;
1341 /* Set the latency of the given resource to the given number of cycles. */
1343 update_fdiv_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1345 /* operate directly on the busy cycles since each resource can only
1346 be used once in a VLIW insn. */
1347 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1348 int *r = ps->fdiv_busy;
1349 r[in_resource] = cycles;
1352 /* Set the latency of the given resource to the given number of cycles. */
1354 update_fsqrt_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1356 /* operate directly on the busy cycles since each resource can only
1357 be used once in a VLIW insn. */
1358 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1359 int *r = ps->fsqrt_busy;
1360 r[in_resource] = cycles;
1363 /* Set the branch penalty to the given number of cycles. */
1365 update_branch_penalty (SIM_CPU *cpu, int cycles)
1367 /* operate directly on the busy cycles since only one branch can occur
1369 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1370 ps->branch_penalty = cycles;
1373 /* Check the availability of the given GR register and update the number
1374 of cycles the current VLIW insn must wait until it is available. */
1376 vliw_wait_for_GR (SIM_CPU *cpu, INT in_GR)
1378 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1379 int *gr = ps->gr_busy;
1380 /* If the latency of the register is greater than the current wait
1381 then update the current wait. */
1382 if (in_GR >= 0 && gr[in_GR] > ps->vliw_wait)
1384 if (TRACE_INSN_P (cpu))
1385 sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
1386 ps->vliw_wait = gr[in_GR];
1390 /* Check the availability of the given GR register and update the number
1391 of cycles the current VLIW insn must wait until it is available. */
1393 vliw_wait_for_GRdouble (SIM_CPU *cpu, INT in_GR)
1395 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1396 int *gr = ps->gr_busy;
1397 /* If the latency of the register is greater than the current wait
1398 then update the current wait. */
1401 if (gr[in_GR] > ps->vliw_wait)
1403 if (TRACE_INSN_P (cpu))
1404 sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
1405 ps->vliw_wait = gr[in_GR];
1407 if (in_GR < 63 && gr[in_GR + 1] > ps->vliw_wait)
1409 if (TRACE_INSN_P (cpu))
1410 sprintf (hazard_name, "Data hazard for gr%d:", in_GR + 1);
1411 ps->vliw_wait = gr[in_GR + 1];
1416 /* Check the availability of the given FR register and update the number
1417 of cycles the current VLIW insn must wait until it is available. */
1419 vliw_wait_for_FR (SIM_CPU *cpu, INT in_FR)
1421 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1422 int *fr = ps->fr_busy;
1423 /* If the latency of the register is greater than the current wait
1424 then update the current wait. */
1425 if (in_FR >= 0 && fr[in_FR] > ps->vliw_wait)
1427 if (TRACE_INSN_P (cpu))
1428 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1429 ps->vliw_wait = fr[in_FR];
1433 /* Check the availability of the given GR register and update the number
1434 of cycles the current VLIW insn must wait until it is available. */
1436 vliw_wait_for_FRdouble (SIM_CPU *cpu, INT in_FR)
1438 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1439 int *fr = ps->fr_busy;
1440 /* If the latency of the register is greater than the current wait
1441 then update the current wait. */
1444 if (fr[in_FR] > ps->vliw_wait)
1446 if (TRACE_INSN_P (cpu))
1447 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1448 ps->vliw_wait = fr[in_FR];
1450 if (in_FR < 63 && fr[in_FR + 1] > ps->vliw_wait)
1452 if (TRACE_INSN_P (cpu))
1453 sprintf (hazard_name, "Data hazard for fr%d:", in_FR + 1);
1454 ps->vliw_wait = fr[in_FR + 1];
1459 /* Check the availability of the given CCR register and update the number
1460 of cycles the current VLIW insn must wait until it is available. */
1462 vliw_wait_for_CCR (SIM_CPU *cpu, INT in_CCR)
1464 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1465 int *ccr = ps->ccr_busy;
1466 /* If the latency of the register is greater than the current wait
1467 then update the current wait. */
1468 if (in_CCR >= 0 && ccr[in_CCR] > ps->vliw_wait)
1470 if (TRACE_INSN_P (cpu))
1473 sprintf (hazard_name, "Data hazard for icc%d:", in_CCR-4);
1475 sprintf (hazard_name, "Data hazard for fcc%d:", in_CCR);
1477 ps->vliw_wait = ccr[in_CCR];
1481 /* Check the availability of the given ACC register and update the number
1482 of cycles the current VLIW insn must wait until it is available. */
1484 vliw_wait_for_ACC (SIM_CPU *cpu, INT in_ACC)
1486 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1487 int *acc = ps->acc_busy;
1488 /* If the latency of the register is greater than the current wait
1489 then update the current wait. */
1490 if (in_ACC >= 0 && acc[in_ACC] > ps->vliw_wait)
1492 if (TRACE_INSN_P (cpu))
1493 sprintf (hazard_name, "Data hazard for acc%d:", in_ACC);
1494 ps->vliw_wait = acc[in_ACC];
1498 /* Check the availability of the given SPR register and update the number
1499 of cycles the current VLIW insn must wait until it is available. */
1501 vliw_wait_for_SPR (SIM_CPU *cpu, INT in_SPR)
1503 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1504 int *spr = ps->spr_busy;
1505 /* If the latency of the register is greater than the current wait
1506 then update the current wait. */
1507 if (in_SPR >= 0 && spr[in_SPR] > ps->vliw_wait)
1509 if (TRACE_INSN_P (cpu))
1510 sprintf (hazard_name, "Data hazard for spr %d:", in_SPR);
1511 ps->vliw_wait = spr[in_SPR];
1515 /* Check the availability of the given integer division resource and update
1516 the number of cycles the current VLIW insn must wait until it is available.
1519 vliw_wait_for_idiv_resource (SIM_CPU *cpu, INT in_resource)
1521 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1522 int *r = ps->idiv_busy;
1523 /* If the latency of the resource is greater than the current wait
1524 then update the current wait. */
1525 if (r[in_resource] > ps->vliw_wait)
1527 if (TRACE_INSN_P (cpu))
1529 sprintf (hazard_name, "Resource hazard for integer division in slot I%d:", in_resource);
1531 ps->vliw_wait = r[in_resource];
1535 /* Check the availability of the given float division resource and update
1536 the number of cycles the current VLIW insn must wait until it is available.
1539 vliw_wait_for_fdiv_resource (SIM_CPU *cpu, INT in_resource)
1541 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1542 int *r = ps->fdiv_busy;
1543 /* If the latency of the resource is greater than the current wait
1544 then update the current wait. */
1545 if (r[in_resource] > ps->vliw_wait)
1547 if (TRACE_INSN_P (cpu))
1549 sprintf (hazard_name, "Resource hazard for floating point division in slot F%d:", in_resource);
1551 ps->vliw_wait = r[in_resource];
1555 /* Check the availability of the given float square root resource and update
1556 the number of cycles the current VLIW insn must wait until it is available.
1559 vliw_wait_for_fsqrt_resource (SIM_CPU *cpu, INT in_resource)
1561 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1562 int *r = ps->fsqrt_busy;
1563 /* If the latency of the resource is greater than the current wait
1564 then update the current wait. */
1565 if (r[in_resource] > ps->vliw_wait)
1567 if (TRACE_INSN_P (cpu))
1569 sprintf (hazard_name, "Resource hazard for square root in slot F%d:", in_resource);
1571 ps->vliw_wait = r[in_resource];
1575 /* Run the caches until all requests for the given register(s) are satisfied. */
1577 load_wait_for_GR (SIM_CPU *cpu, INT in_GR)
1582 while (load_pending_for_register (cpu, in_GR, 1/*words*/, REGTYPE_NONE))
1584 frv_model_advance_cycles (cpu, 1);
1589 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1590 ps->vliw_wait += wait;
1591 ps->vliw_load_stall += wait;
1592 if (TRACE_INSN_P (cpu))
1593 sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
1599 load_wait_for_FR (SIM_CPU *cpu, INT in_FR)
1603 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1606 while (load_pending_for_register (cpu, in_FR, 1/*words*/, REGTYPE_FR))
1608 frv_model_advance_cycles (cpu, 1);
1611 /* Post processing time may have been added to the register's
1612 latency after the loads were processed. Account for that too.
1618 frv_model_advance_cycles (cpu, fr[in_FR]);
1620 /* Update the vliw_wait with the number of cycles we waited for the
1621 load and any post-processing. */
1624 ps->vliw_wait += wait;
1625 ps->vliw_load_stall += wait;
1626 if (TRACE_INSN_P (cpu))
1627 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1633 load_wait_for_GRdouble (SIM_CPU *cpu, INT in_GR)
1638 while (load_pending_for_register (cpu, in_GR, 2/*words*/, REGTYPE_NONE))
1640 frv_model_advance_cycles (cpu, 1);
1645 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1646 ps->vliw_wait += wait;
1647 ps->vliw_load_stall += wait;
1648 if (TRACE_INSN_P (cpu))
1649 sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
1655 load_wait_for_FRdouble (SIM_CPU *cpu, INT in_FR)
1659 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1662 while (load_pending_for_register (cpu, in_FR, 2/*words*/, REGTYPE_FR))
1664 frv_model_advance_cycles (cpu, 1);
1667 /* Post processing time may have been added to the registers'
1668 latencies after the loads were processed. Account for that too.
1674 frv_model_advance_cycles (cpu, fr[in_FR]);
1680 wait += fr[in_FR + 1];
1681 frv_model_advance_cycles (cpu, fr[in_FR + 1]);
1684 /* Update the vliw_wait with the number of cycles we waited for the
1685 load and any post-processing. */
1688 ps->vliw_wait += wait;
1689 ps->vliw_load_stall += wait;
1690 if (TRACE_INSN_P (cpu))
1691 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1697 enforce_full_fr_latency (SIM_CPU *cpu, INT in_FR)
1699 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1700 ps->fr_busy_adjust [in_FR] = -1;
1703 /* Calculate how long the post processing for a floating point insn must
1704 wait for resources to become available. */
1706 post_wait_for_FR (SIM_CPU *cpu, INT in_FR)
1708 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1709 int *fr = ps->fr_busy;
1711 if (in_FR >= 0 && fr[in_FR] > ps->post_wait)
1713 ps->post_wait = fr[in_FR];
1714 if (TRACE_INSN_P (cpu))
1715 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1719 /* Calculate how long the post processing for a floating point insn must
1720 wait for resources to become available. */
1722 post_wait_for_FRdouble (SIM_CPU *cpu, INT in_FR)
1724 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1725 int *fr = ps->fr_busy;
1729 if (fr[in_FR] > ps->post_wait)
1731 ps->post_wait = fr[in_FR];
1732 if (TRACE_INSN_P (cpu))
1733 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1735 if (in_FR < 63 && fr[in_FR + 1] > ps->post_wait)
1737 ps->post_wait = fr[in_FR + 1];
1738 if (TRACE_INSN_P (cpu))
1739 sprintf (hazard_name, "Data hazard for fr%d:", in_FR + 1);
1745 post_wait_for_ACC (SIM_CPU *cpu, INT in_ACC)
1747 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1748 int *acc = ps->acc_busy;
1750 if (in_ACC >= 0 && acc[in_ACC] > ps->post_wait)
1752 ps->post_wait = acc[in_ACC];
1753 if (TRACE_INSN_P (cpu))
1754 sprintf (hazard_name, "Data hazard for acc%d:", in_ACC);
1759 post_wait_for_CCR (SIM_CPU *cpu, INT in_CCR)
1761 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1762 int *ccr = ps->ccr_busy;
1764 if (in_CCR >= 0 && ccr[in_CCR] > ps->post_wait)
1766 ps->post_wait = ccr[in_CCR];
1767 if (TRACE_INSN_P (cpu))
1770 sprintf (hazard_name, "Data hazard for icc%d:", in_CCR - 4);
1772 sprintf (hazard_name, "Data hazard for fcc%d:", in_CCR);
1778 post_wait_for_SPR (SIM_CPU *cpu, INT in_SPR)
1780 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1781 int *spr = ps->spr_busy;
1783 if (in_SPR >= 0 && spr[in_SPR] > ps->post_wait)
1785 ps->post_wait = spr[in_SPR];
1786 if (TRACE_INSN_P (cpu))
1787 sprintf (hazard_name, "Data hazard for spr[%d]:", in_SPR);
1792 post_wait_for_fdiv (SIM_CPU *cpu, INT slot)
1794 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1795 int *fdiv = ps->fdiv_busy;
1797 /* Multiple floating point divisions in the same slot need only wait 1
1799 if (fdiv[slot] > 0 && 1 > ps->post_wait)
1802 if (TRACE_INSN_P (cpu))
1804 sprintf (hazard_name, "Resource hazard for floating point division in slot F%d:", slot);
1810 post_wait_for_fsqrt (SIM_CPU *cpu, INT slot)
1812 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1813 int *fsqrt = ps->fsqrt_busy;
1815 /* Multiple floating point square roots in the same slot need only wait 1
1817 if (fsqrt[slot] > 0 && 1 > ps->post_wait)
1820 if (TRACE_INSN_P (cpu))
1822 sprintf (hazard_name, "Resource hazard for square root in slot F%d:", slot);
1827 /* Print cpu-specific profile information. */
1828 #define COMMAS(n) sim_add_commas (comma_buf, sizeof (comma_buf), (n))
1831 print_cache (SIM_CPU *cpu, FRV_CACHE *cache, const char *cache_name)
1833 SIM_DESC sd = CPU_STATE (cpu);
1840 sim_io_printf (sd, " %s Cache\n\n", cache_name);
1841 accesses = cache->statistics.accesses;
1842 sim_io_printf (sd, " Total accesses: %s\n", COMMAS (accesses));
1846 unsigned hits = cache->statistics.hits;
1847 sim_io_printf (sd, " Hits: %s\n", COMMAS (hits));
1848 rate = (float)hits / accesses;
1849 sim_io_printf (sd, " Hit rate: %.2f%%\n", rate * 100);
1853 sim_io_printf (sd, " Model %s has no %s cache\n",
1854 MODEL_NAME (CPU_MODEL (cpu)), cache_name);
1856 sim_io_printf (sd, "\n");
1859 /* This table must correspond to the UNIT_ATTR table in
1860 opcodes/frv-desc.h. Only the units up to UNIT_C need be
1861 listed since the others cannot occur after mapping. */
1866 "I0", "I1", "I01", "IALL",
1867 "FM0", "FM1", "FM01", "FMALL", "FMLOW",
1873 print_parallel (SIM_CPU *cpu, int verbose)
1875 SIM_DESC sd = CPU_STATE (cpu);
1876 PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
1877 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1878 unsigned total, vliw;
1882 sim_io_printf (sd, "Model %s Parallelization\n\n",
1883 MODEL_NAME (CPU_MODEL (cpu)));
1885 total = PROFILE_TOTAL_INSN_COUNT (p);
1886 sim_io_printf (sd, " Total instructions: %s\n", COMMAS (total));
1887 vliw = ps->vliw_insns;
1888 sim_io_printf (sd, " VLIW instructions: %s\n", COMMAS (vliw));
1889 average = (float)total / vliw;
1890 sim_io_printf (sd, " Average VLIW length: %.2f\n", average);
1891 average = (float)PROFILE_MODEL_TOTAL_CYCLES (p) / vliw;
1892 sim_io_printf (sd, " Cycles per VLIW instruction: %.2f\n", average);
1893 average = (float)total / PROFILE_MODEL_TOTAL_CYCLES (p);
1894 sim_io_printf (sd, " Instructions per cycle: %.2f\n", average);
1900 int max_name_len = 0;
1901 for (i = UNIT_NIL + 1; i < UNIT_NUM_UNITS; ++i)
1903 if (INSNS_IN_SLOT (i))
1906 if (INSNS_IN_SLOT (i) > max_val)
1907 max_val = INSNS_IN_SLOT (i);
1908 len = strlen (slot_names[i]);
1909 if (len > max_name_len)
1915 sim_io_printf (sd, "\n");
1916 sim_io_printf (sd, " Instructions per slot:\n");
1917 sim_io_printf (sd, "\n");
1918 for (i = UNIT_NIL + 1; i < UNIT_NUM_UNITS; ++i)
1920 if (INSNS_IN_SLOT (i) != 0)
1922 sim_io_printf (sd, " %*s: %*s: ",
1923 max_name_len, slot_names[i],
1924 max_val < 10000 ? 5 : 10,
1925 COMMAS (INSNS_IN_SLOT (i)));
1926 sim_profile_print_bar (sd, PROFILE_HISTOGRAM_WIDTH,
1929 sim_io_printf (sd, "\n");
1932 } /* details to print */
1935 sim_io_printf (sd, "\n");
1939 frv_profile_info (SIM_CPU *cpu, int verbose)
1941 /* FIXME: Need to add smp support. */
1942 PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
1944 #if WITH_PROFILE_PARALLEL_P
1945 if (PROFILE_FLAGS (p) [PROFILE_PARALLEL_IDX])
1946 print_parallel (cpu, verbose);
1949 #if WITH_PROFILE_CACHE_P
1950 if (PROFILE_FLAGS (p) [PROFILE_CACHE_IDX])
1952 SIM_DESC sd = CPU_STATE (cpu);
1953 sim_io_printf (sd, "Model %s Cache Statistics\n\n",
1954 MODEL_NAME (CPU_MODEL (cpu)));
1955 print_cache (cpu, CPU_INSN_CACHE (cpu), "Instruction");
1956 print_cache (cpu, CPU_DATA_CACHE (cpu), "Data");
1958 #endif /* WITH_PROFILE_CACHE_P */
1961 /* A hack to get registers referenced for profiling. */
1962 SI frv_ref_SI (SI ref) {return ref;}
1963 #endif /* WITH_PROFILE_MODEL_P */