1 /* frv simulator machine independent profiling code.
3 Copyright (C) 1998, 1999, 2000, 2001, 2003 Free Software Foundation, Inc.
6 This file is part of the GNU simulators.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License along
19 with this program; if not, write to the Free Software Foundation, Inc.,
20 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 #define WANT_CPU_FRVBF
29 #if WITH_PROFILE_MODEL_P
32 #include "profile-fr400.h"
33 #include "profile-fr500.h"
34 #include "profile-fr550.h"
37 reset_gr_flags (SIM_CPU *cpu, INT gr)
39 SIM_DESC sd = CPU_STATE (cpu);
40 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400)
41 fr400_reset_gr_flags (cpu, gr);
42 /* Other machines have no gr flags right now. */
46 reset_fr_flags (SIM_CPU *cpu, INT fr)
48 SIM_DESC sd = CPU_STATE (cpu);
49 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400)
50 fr400_reset_fr_flags (cpu, fr);
51 else if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr500)
52 fr500_reset_fr_flags (cpu, fr);
56 reset_acc_flags (SIM_CPU *cpu, INT acc)
58 SIM_DESC sd = CPU_STATE (cpu);
59 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400)
60 fr400_reset_acc_flags (cpu, acc);
61 /* Other machines have no acc flags right now. */
65 reset_cc_flags (SIM_CPU *cpu, INT cc)
67 SIM_DESC sd = CPU_STATE (cpu);
68 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr500)
69 fr500_reset_cc_flags (cpu, cc);
70 /* Other machines have no cc flags. */
74 set_use_is_gr_complex (SIM_CPU *cpu, INT gr)
78 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
79 reset_gr_flags (cpu, gr);
80 ps->cur_gr_complex |= (((DI)1) << gr);
85 set_use_not_gr_complex (SIM_CPU *cpu, INT gr)
89 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
90 ps->cur_gr_complex &= ~(((DI)1) << gr);
95 use_is_gr_complex (SIM_CPU *cpu, INT gr)
99 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
100 return ps->cur_gr_complex & (((DI)1) << gr);
105 /* Globals flag indicates whether this insn is being modeled. */
106 enum FRV_INSN_MODELING model_insn = FRV_INSN_NO_MODELING;
108 /* static buffer for the name of the currently most restrictive hazard. */
109 static char hazard_name[100] = "";
111 /* Print information about the wait applied to an entire VLIW insn. */
112 FRV_INSN_FETCH_BUFFER frv_insn_fetch_buffer[]
114 {1, NO_REQNO}, {1, NO_REQNO} /* init with impossible address. */
126 /* A queue of load requests from the data cache. Use to keep track of loads
127 which are still pending. */
128 /* TODO -- some of these are mutually exclusive and can use a union. */
143 enum cache_request request;
144 } CACHE_QUEUE_ELEMENT;
146 #define CACHE_QUEUE_SIZE 64 /* TODO -- make queue dynamic */
151 CACHE_QUEUE_ELEMENT q[CACHE_QUEUE_SIZE];
152 } cache_queue = {0, 0};
154 /* Queue a request for a load from the cache. The load will be queued as
155 'inactive' and will be requested after the given number
156 of cycles have passed from the point the load is activated. */
158 request_cache_load (SIM_CPU *cpu, INT regnum, int regtype, int cycles)
160 CACHE_QUEUE_ELEMENT *q;
164 /* For a conditional load which was not executed, CPU_LOAD_LENGTH will be
166 if (CPU_LOAD_LENGTH (cpu) == 0)
169 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
170 abort (); /* TODO: Make the queue dynamic */
172 q = & cache_queue.q[cache_queue.ix];
175 q->reqno = cache_queue.reqno++;
176 q->request = cache_load;
177 q->cache = CPU_DATA_CACHE (cpu);
178 q->address = CPU_LOAD_ADDRESS (cpu);
179 q->length = CPU_LOAD_LENGTH (cpu);
180 q->is_signed = CPU_LOAD_SIGNED (cpu);
182 q->regtype = regtype;
186 vliw = CPU_VLIW (cpu);
187 slot = vliw->next_slot - 1;
188 q->slot = (*vliw->current_vliw)[slot];
190 CPU_LOAD_LENGTH (cpu) = 0;
193 /* Queue a request to flush the cache. The request will be queued as
194 'inactive' and will be requested after the given number
195 of cycles have passed from the point the request is activated. */
197 request_cache_flush (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
199 CACHE_QUEUE_ELEMENT *q;
203 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
204 abort (); /* TODO: Make the queue dynamic */
206 q = & cache_queue.q[cache_queue.ix];
209 q->reqno = cache_queue.reqno++;
210 q->request = cache_flush;
212 q->address = CPU_LOAD_ADDRESS (cpu);
213 q->all = CPU_PROFILE_STATE (cpu)->all_cache_entries;
217 vliw = CPU_VLIW (cpu);
218 slot = vliw->next_slot - 1;
219 q->slot = (*vliw->current_vliw)[slot];
222 /* Queue a request to invalidate the cache. The request will be queued as
223 'inactive' and will be requested after the given number
224 of cycles have passed from the point the request is activated. */
226 request_cache_invalidate (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
228 CACHE_QUEUE_ELEMENT *q;
232 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
233 abort (); /* TODO: Make the queue dynamic */
235 q = & cache_queue.q[cache_queue.ix];
238 q->reqno = cache_queue.reqno++;
239 q->request = cache_invalidate;
241 q->address = CPU_LOAD_ADDRESS (cpu);
242 q->all = CPU_PROFILE_STATE (cpu)->all_cache_entries;
246 vliw = CPU_VLIW (cpu);
247 slot = vliw->next_slot - 1;
248 q->slot = (*vliw->current_vliw)[slot];
251 /* Queue a request to preload the cache. The request will be queued as
252 'inactive' and will be requested after the given number
253 of cycles have passed from the point the request is activated. */
255 request_cache_preload (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
257 CACHE_QUEUE_ELEMENT *q;
261 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
262 abort (); /* TODO: Make the queue dynamic */
264 q = & cache_queue.q[cache_queue.ix];
267 q->reqno = cache_queue.reqno++;
268 q->request = cache_preload;
270 q->address = CPU_LOAD_ADDRESS (cpu);
271 q->length = CPU_LOAD_LENGTH (cpu);
272 q->lock = CPU_LOAD_LOCK (cpu);
276 vliw = CPU_VLIW (cpu);
277 slot = vliw->next_slot - 1;
278 q->slot = (*vliw->current_vliw)[slot];
280 CPU_LOAD_LENGTH (cpu) = 0;
283 /* Queue a request to unlock the cache. The request will be queued as
284 'inactive' and will be requested after the given number
285 of cycles have passed from the point the request is activated. */
287 request_cache_unlock (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
289 CACHE_QUEUE_ELEMENT *q;
293 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
294 abort (); /* TODO: Make the queue dynamic */
296 q = & cache_queue.q[cache_queue.ix];
299 q->reqno = cache_queue.reqno++;
300 q->request = cache_unlock;
302 q->address = CPU_LOAD_ADDRESS (cpu);
306 vliw = CPU_VLIW (cpu);
307 slot = vliw->next_slot - 1;
308 q->slot = (*vliw->current_vliw)[slot];
312 submit_cache_request (CACHE_QUEUE_ELEMENT *q)
317 frv_cache_request_load (q->cache, q->reqno, q->address, q->slot);
320 frv_cache_request_invalidate (q->cache, q->reqno, q->address, q->slot,
323 case cache_invalidate:
324 frv_cache_request_invalidate (q->cache, q->reqno, q->address, q->slot,
328 frv_cache_request_preload (q->cache, q->address, q->slot,
332 frv_cache_request_unlock (q->cache, q->address, q->slot);
339 /* Activate all inactive load requests. */
341 activate_cache_requests (SIM_CPU *cpu)
344 for (i = 0; i < cache_queue.ix; ++i)
346 CACHE_QUEUE_ELEMENT *q = & cache_queue.q[i];
350 /* Submit the request now if the cycle count is zero. */
352 submit_cache_request (q);
357 /* Check to see if a load is pending which affects the given register(s).
360 load_pending_for_register (SIM_CPU *cpu, int regnum, int words, int regtype)
363 for (i = 0; i < cache_queue.ix; ++i)
365 CACHE_QUEUE_ELEMENT *q = & cache_queue.q[i];
367 /* Must be the same kind of register. */
368 if (! q->active || q->request != cache_load || q->regtype != regtype)
371 /* If the registers numbers are equal, then we have a match. */
372 if (q->regnum == regnum)
373 return 1; /* load pending */
375 /* Check for overlap of a load with a multi-word register. */
376 if (regnum < q->regnum)
378 if (regnum + words > q->regnum)
381 /* Check for overlap of a multi-word load with the register. */
384 int data_words = (q->length + sizeof (SI) - 1) / sizeof (SI);
385 if (q->regnum + data_words > regnum)
390 return 0; /* no load pending */
393 /* Check to see if a cache flush pending which affects the given address. */
395 flush_pending_for_address (SIM_CPU *cpu, SI address)
397 int line_mask = ~(CPU_DATA_CACHE (cpu)->line_size - 1);
399 for (i = 0; i < cache_queue.ix; ++i)
401 CACHE_QUEUE_ELEMENT *q = & cache_queue.q[i];
403 /* Must be the same kind of request and active. */
404 if (! q->active || q->request != cache_flush)
407 /* If the addresses are equal, then we have a match. */
408 if ((q->address & line_mask) == (address & line_mask))
409 return 1; /* flush pending */
412 return 0; /* no flush pending */
416 remove_cache_queue_element (SIM_CPU *cpu, int i)
418 /* If we are removing the load of a FR register, then remember which one(s).
420 CACHE_QUEUE_ELEMENT q = cache_queue.q[i];
422 for (--cache_queue.ix; i < cache_queue.ix; ++i)
423 cache_queue.q[i] = cache_queue.q[i + 1];
425 /* If we removed a load of a FR register, check to see if any other loads
426 of that register is still queued. If not, then apply the queued post
427 processing time of that register to its latency. Also apply
428 1 extra cycle of latency to the register since it was a floating point
430 if (q.request == cache_load && q.regtype != REGTYPE_NONE)
432 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
433 int data_words = (q.length + sizeof (SI) - 1) / sizeof (SI);
435 for (j = 0; j < data_words; ++j)
437 int regnum = q.regnum + j;
438 if (! load_pending_for_register (cpu, regnum, 1, q.regtype))
440 if (q.regtype == REGTYPE_FR)
442 int *fr = ps->fr_busy;
443 fr[regnum] += 1 + ps->fr_ptime[regnum];
444 ps->fr_ptime[regnum] = 0;
451 /* Copy data from the cache buffer to the target register(s). */
453 copy_load_data (SIM_CPU *current_cpu, FRV_CACHE *cache, int slot,
454 CACHE_QUEUE_ELEMENT *q)
459 if (q->regtype == REGTYPE_FR)
463 QI value = CACHE_RETURN_DATA (cache, slot, q->address, QI, 1);
464 SET_H_FR (q->regnum, value);
468 UQI value = CACHE_RETURN_DATA (cache, slot, q->address, UQI, 1);
469 SET_H_FR (q->regnum, value);
476 QI value = CACHE_RETURN_DATA (cache, slot, q->address, QI, 1);
477 SET_H_GR (q->regnum, value);
481 UQI value = CACHE_RETURN_DATA (cache, slot, q->address, UQI, 1);
482 SET_H_GR (q->regnum, value);
487 if (q->regtype == REGTYPE_FR)
491 HI value = CACHE_RETURN_DATA (cache, slot, q->address, HI, 2);
492 SET_H_FR (q->regnum, value);
496 UHI value = CACHE_RETURN_DATA (cache, slot, q->address, UHI, 2);
497 SET_H_FR (q->regnum, value);
504 HI value = CACHE_RETURN_DATA (cache, slot, q->address, HI, 2);
505 SET_H_GR (q->regnum, value);
509 UHI value = CACHE_RETURN_DATA (cache, slot, q->address, UHI, 2);
510 SET_H_GR (q->regnum, value);
515 if (q->regtype == REGTYPE_FR)
518 CACHE_RETURN_DATA (cache, slot, q->address, SF, 4));
523 CACHE_RETURN_DATA (cache, slot, q->address, SI, 4));
527 if (q->regtype == REGTYPE_FR)
529 SET_H_FR_DOUBLE (q->regnum,
530 CACHE_RETURN_DATA (cache, slot, q->address, DF, 8));
534 SET_H_GR_DOUBLE (q->regnum,
535 CACHE_RETURN_DATA (cache, slot, q->address, DI, 8));
539 if (q->regtype == REGTYPE_FR)
540 frvbf_h_fr_quad_set_handler (current_cpu, q->regnum,
541 CACHE_RETURN_DATA_ADDRESS (cache, slot,
545 frvbf_h_gr_quad_set_handler (current_cpu, q->regnum,
546 CACHE_RETURN_DATA_ADDRESS (cache, slot,
556 request_complete (SIM_CPU *cpu, CACHE_QUEUE_ELEMENT *q)
559 if (! q->active || q->cycles > 0)
562 cache = CPU_DATA_CACHE (cpu);
566 /* For loads, we must wait until the data is returned from the cache. */
567 if (frv_cache_data_in_buffer (cache, 0, q->address, q->reqno))
569 copy_load_data (cpu, cache, 0, q);
572 if (frv_cache_data_in_buffer (cache, 1, q->address, q->reqno))
574 copy_load_data (cpu, cache, 1, q);
580 /* We must wait until the data is flushed. */
581 if (frv_cache_data_flushed (cache, 0, q->address, q->reqno))
583 if (frv_cache_data_flushed (cache, 1, q->address, q->reqno))
588 /* All other requests are complete once they've been made. */
595 /* Run the insn and data caches through the given number of cycles, taking
596 note of load requests which are fullfilled as a result. */
598 run_caches (SIM_CPU *cpu, int cycles)
600 FRV_CACHE* data_cache = CPU_DATA_CACHE (cpu);
601 FRV_CACHE* insn_cache = CPU_INSN_CACHE (cpu);
603 /* For each cycle, run the caches, noting which requests have been fullfilled
604 and submitting new requests on their designated cycles. */
605 for (i = 0; i < cycles; ++i)
608 /* Run the caches through 1 cycle. */
609 frv_cache_run (data_cache, 1);
610 frv_cache_run (insn_cache, 1);
612 /* Note whether prefetched insn data has been loaded yet. */
613 for (j = LS; j < FRV_CACHE_PIPELINES; ++j)
615 if (frv_insn_fetch_buffer[j].reqno != NO_REQNO
616 && frv_cache_data_in_buffer (insn_cache, j,
617 frv_insn_fetch_buffer[j].address,
618 frv_insn_fetch_buffer[j].reqno))
619 frv_insn_fetch_buffer[j].reqno = NO_REQNO;
622 /* Check to see which requests have been satisfied and which should
624 for (j = 0; j < cache_queue.ix; ++j)
626 CACHE_QUEUE_ELEMENT *q = & cache_queue.q[j];
630 /* If a load has been satisfied, complete the operation and remove it
632 if (request_complete (cpu, q))
634 remove_cache_queue_element (cpu, j);
639 /* Decrease the cycle count of each queued request.
640 Submit a request for each queued request whose cycle count has
644 submit_cache_request (q);
650 apply_latency_adjustments (SIM_CPU *cpu)
652 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
654 /* update the latencies of the registers. */
655 int *fr = ps->fr_busy;
656 int *acc = ps->acc_busy;
657 for (i = 0; i < 64; ++i)
659 if (ps->fr_busy_adjust[i] > 0)
660 *fr -= ps->fr_busy_adjust[i]; /* OK if it goes negative. */
661 if (ps->acc_busy_adjust[i] > 0)
662 *acc -= ps->acc_busy_adjust[i]; /* OK if it goes negative. */
668 /* Account for the number of cycles which have just passed in the latency of
669 various system elements. Works for negative cycles too so that latency
670 can be extended in the case of insn fetch latency.
671 If negative or zero, then no adjustment is necessary. */
673 update_latencies (SIM_CPU *cpu, int cycles)
675 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
677 /* update the latencies of the registers. */
684 int *gr = ps->gr_busy;
685 int *fr = ps->fr_busy;
686 int *acc = ps->acc_busy;
688 /* This loop handles GR, FR and ACC registers. */
689 for (i = 0; i < 64; ++i)
694 reset_gr_flags (cpu, i);
698 /* If the busy drops to 0, then mark the register as
702 int *fr_lat = ps->fr_latency + i;
704 ps->fr_busy_adjust[i] = 0;
705 /* Only clear flags if this register has no target latency. */
707 reset_fr_flags (cpu, i);
711 /* If the busy drops to 0, then mark the register as
715 int *acc_lat = ps->acc_latency + i;
717 ps->acc_busy_adjust[i] = 0;
718 /* Only clear flags if this register has no target latency. */
720 reset_acc_flags (cpu, i);
728 /* This loop handles CCR registers. */
730 for (i = 0; i < 8; ++i)
735 reset_cc_flags (cpu, i);
741 /* This loop handles SPR registers. */
743 for (i = 0; i < 4096; ++i)
751 /* This loop handles resources. */
752 idiv = ps->idiv_busy;
753 fdiv = ps->fdiv_busy;
754 fsqrt = ps->fsqrt_busy;
755 for (i = 0; i < 2; ++i)
757 *idiv = (*idiv <= cycles) ? 0 : (*idiv - cycles);
758 *fdiv = (*fdiv <= cycles) ? 0 : (*fdiv - cycles);
759 *fsqrt = (*fsqrt <= cycles) ? 0 : (*fsqrt - cycles);
764 /* Float and media units can occur in 4 slots on some machines. */
765 flt = ps->float_busy;
766 media = ps->media_busy;
767 for (i = 0; i < 4; ++i)
769 *flt = (*flt <= cycles) ? 0 : (*flt - cycles);
770 *media = (*media <= cycles) ? 0 : (*media - cycles);
776 /* Print information about the wait for the given number of cycles. */
778 frv_model_trace_wait_cycles (SIM_CPU *cpu, int cycles, const char *hazard_name)
780 if (TRACE_INSN_P (cpu) && cycles > 0)
782 SIM_DESC sd = CPU_STATE (cpu);
783 trace_printf (sd, cpu, "**** %s wait %d cycles ***\n",
784 hazard_name, cycles);
789 trace_vliw_wait_cycles (SIM_CPU *cpu)
791 if (TRACE_INSN_P (cpu))
793 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
794 frv_model_trace_wait_cycles (cpu, ps->vliw_wait, hazard_name);
798 /* Wait for the given number of cycles. */
800 frv_model_advance_cycles (SIM_CPU *cpu, int cycles)
802 PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
803 update_latencies (cpu, cycles);
804 run_caches (cpu, cycles);
805 PROFILE_MODEL_TOTAL_CYCLES (p) += cycles;
809 handle_resource_wait (SIM_CPU *cpu)
811 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
812 if (ps->vliw_wait != 0)
813 frv_model_advance_cycles (cpu, ps->vliw_wait);
814 if (ps->vliw_load_stall > ps->vliw_wait)
815 ps->vliw_load_stall -= ps->vliw_wait;
817 ps->vliw_load_stall = 0;
820 /* Account for the number of cycles until these resources will be available
823 update_target_latencies (SIM_CPU *cpu)
825 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
827 /* update the latencies of the registers. */
829 int *gr_lat = ps->gr_latency;
830 int *fr_lat = ps->fr_latency;
831 int *acc_lat = ps->acc_latency;
834 int *gr = ps->gr_busy;
835 int *fr = ps->fr_busy;
836 int *acc = ps->acc_busy;
838 /* This loop handles GR, FR and ACC registers. */
839 for (i = 0; i < 64; ++i)
860 /* This loop handles CCR registers. */
862 ccr_lat = ps->ccr_latency;
863 for (i = 0; i < 8; ++i)
872 /* This loop handles SPR registers. */
874 spr_lat = ps->spr_latency;
875 for (i = 0; i < 4096; ++i)
886 /* Run the caches until all pending cache flushes are complete. */
888 wait_for_flush (SIM_CPU *cpu)
890 SI address = CPU_LOAD_ADDRESS (cpu);
892 while (flush_pending_for_address (cpu, address))
894 frv_model_advance_cycles (cpu, 1);
897 if (TRACE_INSN_P (cpu) && wait)
899 sprintf (hazard_name, "Data cache flush address %p:", address);
900 frv_model_trace_wait_cycles (cpu, wait, hazard_name);
904 /* Initialize cycle counting for an insn.
905 FIRST_P is non-zero if this is the first insn in a set of parallel
908 frvbf_model_insn_before (SIM_CPU *cpu, int first_p)
910 SIM_DESC sd = CPU_STATE (cpu);
911 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
915 memset (ps->fr_busy_adjust, 0, sizeof (ps->fr_busy_adjust));
916 memset (ps->acc_busy_adjust, 0, sizeof (ps->acc_busy_adjust));
922 ps->vliw_branch_taken = 0;
923 ps->vliw_load_stall = 0;
926 switch (STATE_ARCHITECTURE (sd)->mach)
929 fr400_model_insn_before (cpu, first_p);
932 fr500_model_insn_before (cpu, first_p);
935 fr550_model_insn_before (cpu, first_p);
942 wait_for_flush (cpu);
945 /* Record the cycles computed for an insn.
946 LAST_P is non-zero if this is the last insn in a set of parallel insns,
947 and we update the total cycle count.
948 CYCLES is the cycle count of the insn. */
951 frvbf_model_insn_after (SIM_CPU *cpu, int last_p, int cycles)
953 PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
954 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
955 SIM_DESC sd = CPU_STATE (cpu);
957 PROFILE_MODEL_CUR_INSN_CYCLES (p) = cycles;
959 /* The number of cycles for a VLIW insn is the maximum number of cycles
960 used by any individual insn within it. */
961 if (cycles > ps->vliw_cycles)
962 ps->vliw_cycles = cycles;
966 /* This is the last insn in a VLIW insn. */
967 struct frv_interrupt_timer *timer = & frv_interrupt_state.timer;
969 activate_cache_requests (cpu); /* before advancing cycles. */
970 apply_latency_adjustments (cpu); /* must go first. */
971 update_target_latencies (cpu); /* must go next. */
972 frv_model_advance_cycles (cpu, ps->vliw_cycles);
974 PROFILE_MODEL_LOAD_STALL_CYCLES (p) += ps->vliw_load_stall;
976 /* Check the interrupt timer. cycles contains the total cycle count. */
979 cycles = PROFILE_MODEL_TOTAL_CYCLES (p);
980 if (timer->current % timer->value
981 + (cycles - timer->current) >= timer->value)
982 frv_queue_external_interrupt (cpu, timer->interrupt);
983 timer->current = cycles;
986 ps->past_first_p = 0; /* Next one will be the first in a new VLIW. */
987 ps->branch_address = -1;
990 ps->past_first_p = 1;
992 switch (STATE_ARCHITECTURE (sd)->mach)
995 fr400_model_insn_after (cpu, last_p, cycles);
998 fr500_model_insn_after (cpu, last_p, cycles);
1000 case bfd_mach_fr550:
1001 fr550_model_insn_after (cpu, last_p, cycles);
1009 frvbf_model_branch (SIM_CPU *current_cpu, PCADDR target, int hint)
1011 /* Record the hint and branch address for use in profiling. */
1012 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1013 ps->branch_hint = hint;
1014 ps->branch_address = target;
1017 /* Top up the latency of the given GR by the given number of cycles. */
1019 update_GR_latency (SIM_CPU *cpu, INT out_GR, int cycles)
1023 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1024 int *gr = ps->gr_latency;
1025 if (gr[out_GR] < cycles)
1026 gr[out_GR] = cycles;
1031 decrease_GR_busy (SIM_CPU *cpu, INT in_GR, int cycles)
1035 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1036 int *gr = ps->gr_busy;
1037 gr[in_GR] -= cycles;
1041 /* Top up the latency of the given double GR by the number of cycles. */
1043 update_GRdouble_latency (SIM_CPU *cpu, INT out_GR, int cycles)
1047 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1048 int *gr = ps->gr_latency;
1049 if (gr[out_GR] < cycles)
1050 gr[out_GR] = cycles;
1051 if (out_GR < 63 && gr[out_GR + 1] < cycles)
1052 gr[out_GR + 1] = cycles;
1057 update_GR_latency_for_load (SIM_CPU *cpu, INT out_GR, int cycles)
1061 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1062 int *gr = ps->gr_latency;
1064 /* The latency of the GR will be at least the number of cycles used
1066 if (gr[out_GR] < cycles)
1067 gr[out_GR] = cycles;
1069 /* The latency will also depend on how long it takes to retrieve the
1070 data from the cache or memory. Assume that the load is issued
1071 after the last cycle of the insn. */
1072 request_cache_load (cpu, out_GR, REGTYPE_NONE, cycles);
1077 update_GRdouble_latency_for_load (SIM_CPU *cpu, INT out_GR, int cycles)
1081 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1082 int *gr = ps->gr_latency;
1084 /* The latency of the GR will be at least the number of cycles used
1086 if (gr[out_GR] < cycles)
1087 gr[out_GR] = cycles;
1088 if (out_GR < 63 && gr[out_GR + 1] < cycles)
1089 gr[out_GR + 1] = cycles;
1091 /* The latency will also depend on how long it takes to retrieve the
1092 data from the cache or memory. Assume that the load is issued
1093 after the last cycle of the insn. */
1094 request_cache_load (cpu, out_GR, REGTYPE_NONE, cycles);
1099 update_GR_latency_for_swap (SIM_CPU *cpu, INT out_GR, int cycles)
1101 update_GR_latency_for_load (cpu, out_GR, cycles);
1104 /* Top up the latency of the given FR by the given number of cycles. */
1106 update_FR_latency (SIM_CPU *cpu, INT out_FR, int cycles)
1110 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1111 int *fr = ps->fr_latency;
1112 if (fr[out_FR] < cycles)
1113 fr[out_FR] = cycles;
1117 /* Top up the latency of the given double FR by the number of cycles. */
1119 update_FRdouble_latency (SIM_CPU *cpu, INT out_FR, int cycles)
1123 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1124 int *fr = ps->fr_latency;
1125 if (fr[out_FR] < cycles)
1126 fr[out_FR] = cycles;
1127 if (out_FR < 63 && fr[out_FR + 1] < cycles)
1128 fr[out_FR + 1] = cycles;
1133 update_FR_latency_for_load (SIM_CPU *cpu, INT out_FR, int cycles)
1137 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1138 int *fr = ps->fr_latency;
1140 /* The latency of the FR will be at least the number of cycles used
1142 if (fr[out_FR] < cycles)
1143 fr[out_FR] = cycles;
1145 /* The latency will also depend on how long it takes to retrieve the
1146 data from the cache or memory. Assume that the load is issued
1147 after the last cycle of the insn. */
1148 request_cache_load (cpu, out_FR, REGTYPE_FR, cycles);
1153 update_FRdouble_latency_for_load (SIM_CPU *cpu, INT out_FR, int cycles)
1157 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1158 int *fr = ps->fr_latency;
1160 /* The latency of the FR will be at least the number of cycles used
1162 if (fr[out_FR] < cycles)
1163 fr[out_FR] = cycles;
1164 if (out_FR < 63 && fr[out_FR + 1] < cycles)
1165 fr[out_FR + 1] = cycles;
1167 /* The latency will also depend on how long it takes to retrieve the
1168 data from the cache or memory. Assume that the load is issued
1169 after the last cycle of the insn. */
1170 request_cache_load (cpu, out_FR, REGTYPE_FR, cycles);
1174 /* Top up the post-processing time of the given FR by the given number of
1177 update_FR_ptime (SIM_CPU *cpu, INT out_FR, int cycles)
1181 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1182 /* If a load is pending on this register, then add the cycles to
1183 the post processing time for this register. Otherwise apply it
1184 directly to the latency of the register. */
1185 if (! load_pending_for_register (cpu, out_FR, 1, REGTYPE_FR))
1187 int *fr = ps->fr_latency;
1188 fr[out_FR] += cycles;
1191 ps->fr_ptime[out_FR] += cycles;
1196 update_FRdouble_ptime (SIM_CPU *cpu, INT out_FR, int cycles)
1200 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1201 /* If a load is pending on this register, then add the cycles to
1202 the post processing time for this register. Otherwise apply it
1203 directly to the latency of the register. */
1204 if (! load_pending_for_register (cpu, out_FR, 2, REGTYPE_FR))
1206 int *fr = ps->fr_latency;
1207 fr[out_FR] += cycles;
1209 fr[out_FR + 1] += cycles;
1213 ps->fr_ptime[out_FR] += cycles;
1215 ps->fr_ptime[out_FR + 1] += cycles;
1220 /* Top up the post-processing time of the given ACC by the given number of
1223 update_ACC_ptime (SIM_CPU *cpu, INT out_ACC, int cycles)
1227 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1228 /* No load can be pending on this register. Apply the cycles
1229 directly to the latency of the register. */
1230 int *acc = ps->acc_latency;
1231 acc[out_ACC] += cycles;
1235 /* Top up the post-processing time of the given SPR by the given number of
1238 update_SPR_ptime (SIM_CPU *cpu, INT out_SPR, int cycles)
1242 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1243 /* No load can be pending on this register. Apply the cycles
1244 directly to the latency of the register. */
1245 int *spr = ps->spr_latency;
1246 spr[out_SPR] += cycles;
1251 decrease_ACC_busy (SIM_CPU *cpu, INT out_ACC, int cycles)
1255 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1256 int *acc = ps->acc_busy;
1257 acc[out_ACC] -= cycles;
1258 if (ps->acc_busy_adjust[out_ACC] >= 0
1259 && cycles > ps->acc_busy_adjust[out_ACC])
1260 ps->acc_busy_adjust[out_ACC] = cycles;
1265 increase_ACC_busy (SIM_CPU *cpu, INT out_ACC, int cycles)
1269 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1270 int *acc = ps->acc_busy;
1271 acc[out_ACC] += cycles;
1276 enforce_full_acc_latency (SIM_CPU *cpu, INT in_ACC)
1278 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1279 ps->acc_busy_adjust [in_ACC] = -1;
1283 decrease_FR_busy (SIM_CPU *cpu, INT out_FR, int cycles)
1287 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1288 int *fr = ps->fr_busy;
1289 fr[out_FR] -= cycles;
1290 if (ps->fr_busy_adjust[out_FR] >= 0
1291 && cycles > ps->fr_busy_adjust[out_FR])
1292 ps->fr_busy_adjust[out_FR] = cycles;
1297 increase_FR_busy (SIM_CPU *cpu, INT out_FR, int cycles)
1301 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1302 int *fr = ps->fr_busy;
1303 fr[out_FR] += cycles;
1307 /* Top up the latency of the given ACC by the given number of cycles. */
1309 update_ACC_latency (SIM_CPU *cpu, INT out_ACC, int cycles)
1313 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1314 int *acc = ps->acc_latency;
1315 if (acc[out_ACC] < cycles)
1316 acc[out_ACC] = cycles;
1320 /* Top up the latency of the given CCR by the given number of cycles. */
1322 update_CCR_latency (SIM_CPU *cpu, INT out_CCR, int cycles)
1326 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1327 int *ccr = ps->ccr_latency;
1328 if (ccr[out_CCR] < cycles)
1329 ccr[out_CCR] = cycles;
1333 /* Top up the latency of the given SPR by the given number of cycles. */
1335 update_SPR_latency (SIM_CPU *cpu, INT out_SPR, int cycles)
1339 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1340 int *spr = ps->spr_latency;
1341 if (spr[out_SPR] < cycles)
1342 spr[out_SPR] = cycles;
1346 /* Top up the latency of the given integer division resource by the given
1347 number of cycles. */
1349 update_idiv_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1351 /* operate directly on the busy cycles since each resource can only
1352 be used once in a VLIW insn. */
1353 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1354 int *r = ps->idiv_busy;
1355 r[in_resource] = cycles;
1358 /* Set the latency of the given resource to the given number of cycles. */
1360 update_fdiv_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1362 /* operate directly on the busy cycles since each resource can only
1363 be used once in a VLIW insn. */
1364 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1365 int *r = ps->fdiv_busy;
1366 r[in_resource] = cycles;
1369 /* Set the latency of the given resource to the given number of cycles. */
1371 update_fsqrt_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1373 /* operate directly on the busy cycles since each resource can only
1374 be used once in a VLIW insn. */
1375 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1376 int *r = ps->fsqrt_busy;
1377 r[in_resource] = cycles;
1380 /* Set the latency of the given resource to the given number of cycles. */
1382 update_float_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1384 /* operate directly on the busy cycles since each resource can only
1385 be used once in a VLIW insn. */
1386 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1387 int *r = ps->float_busy;
1388 r[in_resource] = cycles;
1392 update_media_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1394 /* operate directly on the busy cycles since each resource can only
1395 be used once in a VLIW insn. */
1396 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1397 int *r = ps->media_busy;
1398 r[in_resource] = cycles;
1401 /* Set the branch penalty to the given number of cycles. */
1403 update_branch_penalty (SIM_CPU *cpu, int cycles)
1405 /* operate directly on the busy cycles since only one branch can occur
1407 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1408 ps->branch_penalty = cycles;
1411 /* Check the availability of the given GR register and update the number
1412 of cycles the current VLIW insn must wait until it is available. */
1414 vliw_wait_for_GR (SIM_CPU *cpu, INT in_GR)
1416 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1417 int *gr = ps->gr_busy;
1418 /* If the latency of the register is greater than the current wait
1419 then update the current wait. */
1420 if (in_GR >= 0 && gr[in_GR] > ps->vliw_wait)
1422 if (TRACE_INSN_P (cpu))
1423 sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
1424 ps->vliw_wait = gr[in_GR];
1428 /* Check the availability of the given GR register and update the number
1429 of cycles the current VLIW insn must wait until it is available. */
1431 vliw_wait_for_GRdouble (SIM_CPU *cpu, INT in_GR)
1433 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1434 int *gr = ps->gr_busy;
1435 /* If the latency of the register is greater than the current wait
1436 then update the current wait. */
1439 if (gr[in_GR] > ps->vliw_wait)
1441 if (TRACE_INSN_P (cpu))
1442 sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
1443 ps->vliw_wait = gr[in_GR];
1445 if (in_GR < 63 && gr[in_GR + 1] > ps->vliw_wait)
1447 if (TRACE_INSN_P (cpu))
1448 sprintf (hazard_name, "Data hazard for gr%d:", in_GR + 1);
1449 ps->vliw_wait = gr[in_GR + 1];
1454 /* Check the availability of the given FR register and update the number
1455 of cycles the current VLIW insn must wait until it is available. */
1457 vliw_wait_for_FR (SIM_CPU *cpu, INT in_FR)
1459 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1460 int *fr = ps->fr_busy;
1461 /* If the latency of the register is greater than the current wait
1462 then update the current wait. */
1463 if (in_FR >= 0 && fr[in_FR] > ps->vliw_wait)
1465 if (TRACE_INSN_P (cpu))
1466 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1467 ps->vliw_wait = fr[in_FR];
1471 /* Check the availability of the given GR register and update the number
1472 of cycles the current VLIW insn must wait until it is available. */
1474 vliw_wait_for_FRdouble (SIM_CPU *cpu, INT in_FR)
1476 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1477 int *fr = ps->fr_busy;
1478 /* If the latency of the register is greater than the current wait
1479 then update the current wait. */
1482 if (fr[in_FR] > ps->vliw_wait)
1484 if (TRACE_INSN_P (cpu))
1485 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1486 ps->vliw_wait = fr[in_FR];
1488 if (in_FR < 63 && fr[in_FR + 1] > ps->vliw_wait)
1490 if (TRACE_INSN_P (cpu))
1491 sprintf (hazard_name, "Data hazard for fr%d:", in_FR + 1);
1492 ps->vliw_wait = fr[in_FR + 1];
1497 /* Check the availability of the given CCR register and update the number
1498 of cycles the current VLIW insn must wait until it is available. */
1500 vliw_wait_for_CCR (SIM_CPU *cpu, INT in_CCR)
1502 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1503 int *ccr = ps->ccr_busy;
1504 /* If the latency of the register is greater than the current wait
1505 then update the current wait. */
1506 if (in_CCR >= 0 && ccr[in_CCR] > ps->vliw_wait)
1508 if (TRACE_INSN_P (cpu))
1511 sprintf (hazard_name, "Data hazard for icc%d:", in_CCR-4);
1513 sprintf (hazard_name, "Data hazard for fcc%d:", in_CCR);
1515 ps->vliw_wait = ccr[in_CCR];
1519 /* Check the availability of the given ACC register and update the number
1520 of cycles the current VLIW insn must wait until it is available. */
1522 vliw_wait_for_ACC (SIM_CPU *cpu, INT in_ACC)
1524 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1525 int *acc = ps->acc_busy;
1526 /* If the latency of the register is greater than the current wait
1527 then update the current wait. */
1528 if (in_ACC >= 0 && acc[in_ACC] > ps->vliw_wait)
1530 if (TRACE_INSN_P (cpu))
1531 sprintf (hazard_name, "Data hazard for acc%d:", in_ACC);
1532 ps->vliw_wait = acc[in_ACC];
1536 /* Check the availability of the given SPR register and update the number
1537 of cycles the current VLIW insn must wait until it is available. */
1539 vliw_wait_for_SPR (SIM_CPU *cpu, INT in_SPR)
1541 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1542 int *spr = ps->spr_busy;
1543 /* If the latency of the register is greater than the current wait
1544 then update the current wait. */
1545 if (in_SPR >= 0 && spr[in_SPR] > ps->vliw_wait)
1547 if (TRACE_INSN_P (cpu))
1548 sprintf (hazard_name, "Data hazard for spr %d:", in_SPR);
1549 ps->vliw_wait = spr[in_SPR];
1553 /* Check the availability of the given integer division resource and update
1554 the number of cycles the current VLIW insn must wait until it is available.
1557 vliw_wait_for_idiv_resource (SIM_CPU *cpu, INT in_resource)
1559 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1560 int *r = ps->idiv_busy;
1561 /* If the latency of the resource is greater than the current wait
1562 then update the current wait. */
1563 if (r[in_resource] > ps->vliw_wait)
1565 if (TRACE_INSN_P (cpu))
1567 sprintf (hazard_name, "Resource hazard for integer division in slot I%d:", in_resource);
1569 ps->vliw_wait = r[in_resource];
1573 /* Check the availability of the given float division resource and update
1574 the number of cycles the current VLIW insn must wait until it is available.
1577 vliw_wait_for_fdiv_resource (SIM_CPU *cpu, INT in_resource)
1579 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1580 int *r = ps->fdiv_busy;
1581 /* If the latency of the resource is greater than the current wait
1582 then update the current wait. */
1583 if (r[in_resource] > ps->vliw_wait)
1585 if (TRACE_INSN_P (cpu))
1587 sprintf (hazard_name, "Resource hazard for floating point division in slot F%d:", in_resource);
1589 ps->vliw_wait = r[in_resource];
1593 /* Check the availability of the given float square root resource and update
1594 the number of cycles the current VLIW insn must wait until it is available.
1597 vliw_wait_for_fsqrt_resource (SIM_CPU *cpu, INT in_resource)
1599 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1600 int *r = ps->fsqrt_busy;
1601 /* If the latency of the resource is greater than the current wait
1602 then update the current wait. */
1603 if (r[in_resource] > ps->vliw_wait)
1605 if (TRACE_INSN_P (cpu))
1607 sprintf (hazard_name, "Resource hazard for square root in slot F%d:", in_resource);
1609 ps->vliw_wait = r[in_resource];
1613 /* Check the availability of the given float unit resource and update
1614 the number of cycles the current VLIW insn must wait until it is available.
1617 vliw_wait_for_float_resource (SIM_CPU *cpu, INT in_resource)
1619 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1620 int *r = ps->float_busy;
1621 /* If the latency of the resource is greater than the current wait
1622 then update the current wait. */
1623 if (r[in_resource] > ps->vliw_wait)
1625 if (TRACE_INSN_P (cpu))
1627 sprintf (hazard_name, "Resource hazard for floating point unit in slot F%d:", in_resource);
1629 ps->vliw_wait = r[in_resource];
1633 /* Check the availability of the given media unit resource and update
1634 the number of cycles the current VLIW insn must wait until it is available.
1637 vliw_wait_for_media_resource (SIM_CPU *cpu, INT in_resource)
1639 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1640 int *r = ps->media_busy;
1641 /* If the latency of the resource is greater than the current wait
1642 then update the current wait. */
1643 if (r[in_resource] > ps->vliw_wait)
1645 if (TRACE_INSN_P (cpu))
1647 sprintf (hazard_name, "Resource hazard for media unit in slot M%d:", in_resource);
1649 ps->vliw_wait = r[in_resource];
1653 /* Run the caches until all requests for the given register(s) are satisfied. */
1655 load_wait_for_GR (SIM_CPU *cpu, INT in_GR)
1660 while (load_pending_for_register (cpu, in_GR, 1/*words*/, REGTYPE_NONE))
1662 frv_model_advance_cycles (cpu, 1);
1667 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1668 ps->vliw_wait += wait;
1669 ps->vliw_load_stall += wait;
1670 if (TRACE_INSN_P (cpu))
1671 sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
1677 load_wait_for_FR (SIM_CPU *cpu, INT in_FR)
1681 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1684 while (load_pending_for_register (cpu, in_FR, 1/*words*/, REGTYPE_FR))
1686 frv_model_advance_cycles (cpu, 1);
1689 /* Post processing time may have been added to the register's
1690 latency after the loads were processed. Account for that too.
1696 frv_model_advance_cycles (cpu, fr[in_FR]);
1698 /* Update the vliw_wait with the number of cycles we waited for the
1699 load and any post-processing. */
1702 ps->vliw_wait += wait;
1703 ps->vliw_load_stall += wait;
1704 if (TRACE_INSN_P (cpu))
1705 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1711 load_wait_for_GRdouble (SIM_CPU *cpu, INT in_GR)
1716 while (load_pending_for_register (cpu, in_GR, 2/*words*/, REGTYPE_NONE))
1718 frv_model_advance_cycles (cpu, 1);
1723 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1724 ps->vliw_wait += wait;
1725 ps->vliw_load_stall += wait;
1726 if (TRACE_INSN_P (cpu))
1727 sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
1733 load_wait_for_FRdouble (SIM_CPU *cpu, INT in_FR)
1737 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1740 while (load_pending_for_register (cpu, in_FR, 2/*words*/, REGTYPE_FR))
1742 frv_model_advance_cycles (cpu, 1);
1745 /* Post processing time may have been added to the registers'
1746 latencies after the loads were processed. Account for that too.
1752 frv_model_advance_cycles (cpu, fr[in_FR]);
1758 wait += fr[in_FR + 1];
1759 frv_model_advance_cycles (cpu, fr[in_FR + 1]);
1762 /* Update the vliw_wait with the number of cycles we waited for the
1763 load and any post-processing. */
1766 ps->vliw_wait += wait;
1767 ps->vliw_load_stall += wait;
1768 if (TRACE_INSN_P (cpu))
1769 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1775 enforce_full_fr_latency (SIM_CPU *cpu, INT in_FR)
1777 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1778 ps->fr_busy_adjust [in_FR] = -1;
1781 /* Calculate how long the post processing for a floating point insn must
1782 wait for resources to become available. */
1784 post_wait_for_FR (SIM_CPU *cpu, INT in_FR)
1786 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1787 int *fr = ps->fr_busy;
1789 if (in_FR >= 0 && fr[in_FR] > ps->post_wait)
1791 ps->post_wait = fr[in_FR];
1792 if (TRACE_INSN_P (cpu))
1793 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1797 /* Calculate how long the post processing for a floating point insn must
1798 wait for resources to become available. */
1800 post_wait_for_FRdouble (SIM_CPU *cpu, INT in_FR)
1802 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1803 int *fr = ps->fr_busy;
1807 if (fr[in_FR] > ps->post_wait)
1809 ps->post_wait = fr[in_FR];
1810 if (TRACE_INSN_P (cpu))
1811 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1813 if (in_FR < 63 && fr[in_FR + 1] > ps->post_wait)
1815 ps->post_wait = fr[in_FR + 1];
1816 if (TRACE_INSN_P (cpu))
1817 sprintf (hazard_name, "Data hazard for fr%d:", in_FR + 1);
1823 post_wait_for_ACC (SIM_CPU *cpu, INT in_ACC)
1825 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1826 int *acc = ps->acc_busy;
1828 if (in_ACC >= 0 && acc[in_ACC] > ps->post_wait)
1830 ps->post_wait = acc[in_ACC];
1831 if (TRACE_INSN_P (cpu))
1832 sprintf (hazard_name, "Data hazard for acc%d:", in_ACC);
1837 post_wait_for_CCR (SIM_CPU *cpu, INT in_CCR)
1839 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1840 int *ccr = ps->ccr_busy;
1842 if (in_CCR >= 0 && ccr[in_CCR] > ps->post_wait)
1844 ps->post_wait = ccr[in_CCR];
1845 if (TRACE_INSN_P (cpu))
1848 sprintf (hazard_name, "Data hazard for icc%d:", in_CCR - 4);
1850 sprintf (hazard_name, "Data hazard for fcc%d:", in_CCR);
1856 post_wait_for_SPR (SIM_CPU *cpu, INT in_SPR)
1858 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1859 int *spr = ps->spr_busy;
1861 if (in_SPR >= 0 && spr[in_SPR] > ps->post_wait)
1863 ps->post_wait = spr[in_SPR];
1864 if (TRACE_INSN_P (cpu))
1865 sprintf (hazard_name, "Data hazard for spr[%d]:", in_SPR);
1870 post_wait_for_fdiv (SIM_CPU *cpu, INT slot)
1872 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1873 int *fdiv = ps->fdiv_busy;
1875 /* Multiple floating point divisions in the same slot need only wait 1
1877 if (fdiv[slot] > 0 && 1 > ps->post_wait)
1880 if (TRACE_INSN_P (cpu))
1882 sprintf (hazard_name, "Resource hazard for floating point division in slot F%d:", slot);
1888 post_wait_for_fsqrt (SIM_CPU *cpu, INT slot)
1890 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1891 int *fsqrt = ps->fsqrt_busy;
1893 /* Multiple floating point square roots in the same slot need only wait 1
1895 if (fsqrt[slot] > 0 && 1 > ps->post_wait)
1898 if (TRACE_INSN_P (cpu))
1900 sprintf (hazard_name, "Resource hazard for square root in slot F%d:", slot);
1906 post_wait_for_float (SIM_CPU *cpu, INT slot)
1908 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1909 int *flt = ps->float_busy;
1911 /* Multiple floating point square roots in the same slot need only wait 1
1913 if (flt[slot] > ps->post_wait)
1915 ps->post_wait = flt[slot];
1916 if (TRACE_INSN_P (cpu))
1918 sprintf (hazard_name, "Resource hazard for floating point unit in slot F%d:", slot);
1924 post_wait_for_media (SIM_CPU *cpu, INT slot)
1926 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1927 int *media = ps->media_busy;
1929 /* Multiple floating point square roots in the same slot need only wait 1
1931 if (media[slot] > ps->post_wait)
1933 ps->post_wait = media[slot];
1934 if (TRACE_INSN_P (cpu))
1936 sprintf (hazard_name, "Resource hazard for media unit in slot M%d:", slot);
1941 /* Print cpu-specific profile information. */
1942 #define COMMAS(n) sim_add_commas (comma_buf, sizeof (comma_buf), (n))
1945 print_cache (SIM_CPU *cpu, FRV_CACHE *cache, const char *cache_name)
1947 SIM_DESC sd = CPU_STATE (cpu);
1954 sim_io_printf (sd, " %s Cache\n\n", cache_name);
1955 accesses = cache->statistics.accesses;
1956 sim_io_printf (sd, " Total accesses: %s\n", COMMAS (accesses));
1960 unsigned hits = cache->statistics.hits;
1961 sim_io_printf (sd, " Hits: %s\n", COMMAS (hits));
1962 rate = (float)hits / accesses;
1963 sim_io_printf (sd, " Hit rate: %.2f%%\n", rate * 100);
1967 sim_io_printf (sd, " Model %s has no %s cache\n",
1968 MODEL_NAME (CPU_MODEL (cpu)), cache_name);
1970 sim_io_printf (sd, "\n");
1973 /* This table must correspond to the UNIT_ATTR table in
1974 opcodes/frv-desc.h. Only the units up to UNIT_C need be
1975 listed since the others cannot occur after mapping. */
1980 "I0", "I1", "I01", "I2", "I3", "IALL",
1981 "FM0", "FM1", "FM01", "FM2", "FM3", "FMALL", "FMLOW",
1987 print_parallel (SIM_CPU *cpu, int verbose)
1989 SIM_DESC sd = CPU_STATE (cpu);
1990 PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
1991 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1992 unsigned total, vliw;
1996 sim_io_printf (sd, "Model %s Parallelization\n\n",
1997 MODEL_NAME (CPU_MODEL (cpu)));
1999 total = PROFILE_TOTAL_INSN_COUNT (p);
2000 sim_io_printf (sd, " Total instructions: %s\n", COMMAS (total));
2001 vliw = ps->vliw_insns;
2002 sim_io_printf (sd, " VLIW instructions: %s\n", COMMAS (vliw));
2003 average = (float)total / vliw;
2004 sim_io_printf (sd, " Average VLIW length: %.2f\n", average);
2005 average = (float)PROFILE_MODEL_TOTAL_CYCLES (p) / vliw;
2006 sim_io_printf (sd, " Cycles per VLIW instruction: %.2f\n", average);
2007 average = (float)total / PROFILE_MODEL_TOTAL_CYCLES (p);
2008 sim_io_printf (sd, " Instructions per cycle: %.2f\n", average);
2014 int max_name_len = 0;
2015 for (i = UNIT_NIL + 1; i < UNIT_NUM_UNITS; ++i)
2017 if (INSNS_IN_SLOT (i))
2020 if (INSNS_IN_SLOT (i) > max_val)
2021 max_val = INSNS_IN_SLOT (i);
2022 len = strlen (slot_names[i]);
2023 if (len > max_name_len)
2029 sim_io_printf (sd, "\n");
2030 sim_io_printf (sd, " Instructions per slot:\n");
2031 sim_io_printf (sd, "\n");
2032 for (i = UNIT_NIL + 1; i < UNIT_NUM_UNITS; ++i)
2034 if (INSNS_IN_SLOT (i) != 0)
2036 sim_io_printf (sd, " %*s: %*s: ",
2037 max_name_len, slot_names[i],
2038 max_val < 10000 ? 5 : 10,
2039 COMMAS (INSNS_IN_SLOT (i)));
2040 sim_profile_print_bar (sd, PROFILE_HISTOGRAM_WIDTH,
2043 sim_io_printf (sd, "\n");
2046 } /* details to print */
2049 sim_io_printf (sd, "\n");
2053 frv_profile_info (SIM_CPU *cpu, int verbose)
2055 /* FIXME: Need to add smp support. */
2056 PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
2058 #if WITH_PROFILE_PARALLEL_P
2059 if (PROFILE_FLAGS (p) [PROFILE_PARALLEL_IDX])
2060 print_parallel (cpu, verbose);
2063 #if WITH_PROFILE_CACHE_P
2064 if (PROFILE_FLAGS (p) [PROFILE_CACHE_IDX])
2066 SIM_DESC sd = CPU_STATE (cpu);
2067 sim_io_printf (sd, "Model %s Cache Statistics\n\n",
2068 MODEL_NAME (CPU_MODEL (cpu)));
2069 print_cache (cpu, CPU_INSN_CACHE (cpu), "Instruction");
2070 print_cache (cpu, CPU_DATA_CACHE (cpu), "Data");
2072 #endif /* WITH_PROFILE_CACHE_P */
2075 /* A hack to get registers referenced for profiling. */
2076 SI frv_ref_SI (SI ref) {return ref;}
2077 #endif /* WITH_PROFILE_MODEL_P */