1 /* frv simulator machine independent profiling code.
3 Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
6 This file is part of the GNU simulators.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License along
19 with this program; if not, write to the Free Software Foundation, Inc.,
20 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 #define WANT_CPU_FRVBF
29 #if WITH_PROFILE_MODEL_P
32 #include "profile-fr400.h"
33 #include "profile-fr500.h"
36 reset_gr_flags (SIM_CPU *cpu, INT gr)
38 SIM_DESC sd = CPU_STATE (cpu);
39 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400)
40 fr400_reset_gr_flags (cpu, gr);
41 /* Other machines have no gr flags right now. */
45 reset_fr_flags (SIM_CPU *cpu, INT fr)
47 SIM_DESC sd = CPU_STATE (cpu);
48 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400)
49 fr400_reset_fr_flags (cpu, fr);
50 else if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr500)
51 fr500_reset_fr_flags (cpu, fr);
55 reset_acc_flags (SIM_CPU *cpu, INT acc)
57 SIM_DESC sd = CPU_STATE (cpu);
58 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400)
59 fr400_reset_acc_flags (cpu, acc);
60 /* Other machines have no acc flags right now. */
64 reset_cc_flags (SIM_CPU *cpu, INT cc)
66 SIM_DESC sd = CPU_STATE (cpu);
67 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr500)
68 fr500_reset_cc_flags (cpu, cc);
69 /* Other machines have no cc flags. */
73 set_use_is_gr_complex (SIM_CPU *cpu, INT gr)
77 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
78 reset_gr_flags (cpu, gr);
79 ps->cur_gr_complex |= (((DI)1) << gr);
84 set_use_not_gr_complex (SIM_CPU *cpu, INT gr)
88 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
89 ps->cur_gr_complex &= ~(((DI)1) << gr);
94 use_is_gr_complex (SIM_CPU *cpu, INT gr)
98 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
99 return ps->cur_gr_complex & (((DI)1) << gr);
104 /* Globals flag indicates whether this insn is being modeled. */
105 enum FRV_INSN_MODELING model_insn = FRV_INSN_NO_MODELING;
107 /* static buffer for the name of the currently most restrictive hazard. */
108 static char hazard_name[100] = "";
110 /* Print information about the wait applied to an entire VLIW insn. */
111 FRV_INSN_FETCH_BUFFER frv_insn_fetch_buffer[]
113 {1, NO_REQNO}, {1, NO_REQNO} /* init with impossible address. */
125 /* A queue of load requests from the data cache. Use to keep track of loads
126 which are still pending. */
127 /* TODO -- some of these are mutually exclusive and can use a union. */
142 enum cache_request request;
143 } CACHE_QUEUE_ELEMENT;
145 #define CACHE_QUEUE_SIZE 64 /* TODO -- make queue dynamic */
150 CACHE_QUEUE_ELEMENT q[CACHE_QUEUE_SIZE];
151 } cache_queue = {0, 0};
153 /* Queue a request for a load from the cache. The load will be queued as
154 'inactive' and will be requested after the given number
155 of cycles have passed from the point the load is activated. */
157 request_cache_load (SIM_CPU *cpu, INT regnum, int regtype, int cycles)
159 CACHE_QUEUE_ELEMENT *q;
163 /* For a conditional load which was not executed, CPU_LOAD_LENGTH will be
165 if (CPU_LOAD_LENGTH (cpu) == 0)
168 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
169 abort (); /* TODO: Make the queue dynamic */
171 q = & cache_queue.q[cache_queue.ix];
174 q->reqno = cache_queue.reqno++;
175 q->request = cache_load;
176 q->cache = CPU_DATA_CACHE (cpu);
177 q->address = CPU_LOAD_ADDRESS (cpu);
178 q->length = CPU_LOAD_LENGTH (cpu);
179 q->is_signed = CPU_LOAD_SIGNED (cpu);
181 q->regtype = regtype;
185 vliw = CPU_VLIW (cpu);
186 slot = vliw->next_slot - 1;
187 q->slot = (*vliw->current_vliw)[slot];
189 CPU_LOAD_LENGTH (cpu) = 0;
192 /* Queue a request to flush the cache. The request will be queued as
193 'inactive' and will be requested after the given number
194 of cycles have passed from the point the request is activated. */
196 request_cache_flush (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
198 CACHE_QUEUE_ELEMENT *q;
202 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
203 abort (); /* TODO: Make the queue dynamic */
205 q = & cache_queue.q[cache_queue.ix];
208 q->reqno = cache_queue.reqno++;
209 q->request = cache_flush;
211 q->address = CPU_LOAD_ADDRESS (cpu);
212 q->all = CPU_PROFILE_STATE (cpu)->all_cache_entries;
216 vliw = CPU_VLIW (cpu);
217 slot = vliw->next_slot - 1;
218 q->slot = (*vliw->current_vliw)[slot];
221 /* Queue a request to invalidate the cache. The request will be queued as
222 'inactive' and will be requested after the given number
223 of cycles have passed from the point the request is activated. */
225 request_cache_invalidate (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
227 CACHE_QUEUE_ELEMENT *q;
231 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
232 abort (); /* TODO: Make the queue dynamic */
234 q = & cache_queue.q[cache_queue.ix];
237 q->reqno = cache_queue.reqno++;
238 q->request = cache_invalidate;
240 q->address = CPU_LOAD_ADDRESS (cpu);
241 q->all = CPU_PROFILE_STATE (cpu)->all_cache_entries;
245 vliw = CPU_VLIW (cpu);
246 slot = vliw->next_slot - 1;
247 q->slot = (*vliw->current_vliw)[slot];
250 /* Queue a request to preload the cache. The request will be queued as
251 'inactive' and will be requested after the given number
252 of cycles have passed from the point the request is activated. */
254 request_cache_preload (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
256 CACHE_QUEUE_ELEMENT *q;
260 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
261 abort (); /* TODO: Make the queue dynamic */
263 q = & cache_queue.q[cache_queue.ix];
266 q->reqno = cache_queue.reqno++;
267 q->request = cache_preload;
269 q->address = CPU_LOAD_ADDRESS (cpu);
270 q->length = CPU_LOAD_LENGTH (cpu);
271 q->lock = CPU_LOAD_LOCK (cpu);
275 vliw = CPU_VLIW (cpu);
276 slot = vliw->next_slot - 1;
277 q->slot = (*vliw->current_vliw)[slot];
279 CPU_LOAD_LENGTH (cpu) = 0;
282 /* Queue a request to unlock the cache. The request will be queued as
283 'inactive' and will be requested after the given number
284 of cycles have passed from the point the request is activated. */
286 request_cache_unlock (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
288 CACHE_QUEUE_ELEMENT *q;
292 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
293 abort (); /* TODO: Make the queue dynamic */
295 q = & cache_queue.q[cache_queue.ix];
298 q->reqno = cache_queue.reqno++;
299 q->request = cache_unlock;
301 q->address = CPU_LOAD_ADDRESS (cpu);
305 vliw = CPU_VLIW (cpu);
306 slot = vliw->next_slot - 1;
307 q->slot = (*vliw->current_vliw)[slot];
311 submit_cache_request (CACHE_QUEUE_ELEMENT *q)
316 frv_cache_request_load (q->cache, q->reqno, q->address, q->slot);
319 frv_cache_request_invalidate (q->cache, q->reqno, q->address, q->slot,
322 case cache_invalidate:
323 frv_cache_request_invalidate (q->cache, q->reqno, q->address, q->slot,
327 frv_cache_request_preload (q->cache, q->address, q->slot,
331 frv_cache_request_unlock (q->cache, q->address, q->slot);
338 /* Activate all inactive load requests. */
340 activate_cache_requests (SIM_CPU *cpu)
343 for (i = 0; i < cache_queue.ix; ++i)
345 CACHE_QUEUE_ELEMENT *q = & cache_queue.q[i];
349 /* Submit the request now if the cycle count is zero. */
351 submit_cache_request (q);
356 /* Check to see if a load is pending which affects the given register(s).
359 load_pending_for_register (SIM_CPU *cpu, int regnum, int words, int regtype)
362 for (i = 0; i < cache_queue.ix; ++i)
364 CACHE_QUEUE_ELEMENT *q = & cache_queue.q[i];
366 /* Must be the same kind of register. */
367 if (! q->active || q->request != cache_load || q->regtype != regtype)
370 /* If the registers numbers are equal, then we have a match. */
371 if (q->regnum == regnum)
372 return 1; /* load pending */
374 /* Check for overlap of a load with a multi-word register. */
375 if (regnum < q->regnum)
377 if (regnum + words > q->regnum)
380 /* Check for overlap of a multi-word load with the register. */
383 int data_words = (q->length + sizeof (SI) - 1) / sizeof (SI);
384 if (q->regnum + data_words > regnum)
389 return 0; /* no load pending */
392 /* Check to see if a cache flush pending which affects the given address. */
394 flush_pending_for_address (SIM_CPU *cpu, SI address)
396 int line_mask = ~(CPU_DATA_CACHE (cpu)->line_size - 1);
398 for (i = 0; i < cache_queue.ix; ++i)
400 CACHE_QUEUE_ELEMENT *q = & cache_queue.q[i];
402 /* Must be the same kind of request and active. */
403 if (! q->active || q->request != cache_flush)
406 /* If the addresses are equal, then we have a match. */
407 if ((q->address & line_mask) == (address & line_mask))
408 return 1; /* flush pending */
411 return 0; /* no flush pending */
415 remove_cache_queue_element (SIM_CPU *cpu, int i)
417 /* If we are removing the load of a FR register, then remember which one(s).
419 CACHE_QUEUE_ELEMENT q = cache_queue.q[i];
421 for (--cache_queue.ix; i < cache_queue.ix; ++i)
422 cache_queue.q[i] = cache_queue.q[i + 1];
424 /* If we removed a load of a FR register, check to see if any other loads
425 of that register is still queued. If not, then apply the queued post
426 processing time of that register to its latency. Also apply
427 1 extra cycle of latency to the register since it was a floating point
429 if (q.request == cache_load && q.regtype != REGTYPE_NONE)
431 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
432 int data_words = (q.length + sizeof (SI) - 1) / sizeof (SI);
434 for (j = 0; j < data_words; ++j)
436 int regnum = q.regnum + j;
437 if (! load_pending_for_register (cpu, regnum, 1, q.regtype))
439 if (q.regtype == REGTYPE_FR)
441 int *fr = ps->fr_busy;
442 fr[regnum] += 1 + ps->fr_ptime[regnum];
443 ps->fr_ptime[regnum] = 0;
450 /* Copy data from the cache buffer to the target register(s). */
452 copy_load_data (SIM_CPU *current_cpu, FRV_CACHE *cache, int slot,
453 CACHE_QUEUE_ELEMENT *q)
458 if (q->regtype == REGTYPE_FR)
462 QI value = CACHE_RETURN_DATA (cache, slot, q->address, QI, 1);
463 SET_H_FR (q->regnum, value);
467 UQI value = CACHE_RETURN_DATA (cache, slot, q->address, UQI, 1);
468 SET_H_FR (q->regnum, value);
475 QI value = CACHE_RETURN_DATA (cache, slot, q->address, QI, 1);
476 SET_H_GR (q->regnum, value);
480 UQI value = CACHE_RETURN_DATA (cache, slot, q->address, UQI, 1);
481 SET_H_GR (q->regnum, value);
486 if (q->regtype == REGTYPE_FR)
490 HI value = CACHE_RETURN_DATA (cache, slot, q->address, HI, 2);
491 SET_H_FR (q->regnum, value);
495 UHI value = CACHE_RETURN_DATA (cache, slot, q->address, UHI, 2);
496 SET_H_FR (q->regnum, value);
503 HI value = CACHE_RETURN_DATA (cache, slot, q->address, HI, 2);
504 SET_H_GR (q->regnum, value);
508 UHI value = CACHE_RETURN_DATA (cache, slot, q->address, UHI, 2);
509 SET_H_GR (q->regnum, value);
514 if (q->regtype == REGTYPE_FR)
517 CACHE_RETURN_DATA (cache, slot, q->address, SF, 4));
522 CACHE_RETURN_DATA (cache, slot, q->address, SI, 4));
526 if (q->regtype == REGTYPE_FR)
528 SET_H_FR_DOUBLE (q->regnum,
529 CACHE_RETURN_DATA (cache, slot, q->address, DF, 8));
533 SET_H_GR_DOUBLE (q->regnum,
534 CACHE_RETURN_DATA (cache, slot, q->address, DI, 8));
538 if (q->regtype == REGTYPE_FR)
539 frvbf_h_fr_quad_set_handler (current_cpu, q->regnum,
540 CACHE_RETURN_DATA_ADDRESS (cache, slot,
544 frvbf_h_gr_quad_set_handler (current_cpu, q->regnum,
545 CACHE_RETURN_DATA_ADDRESS (cache, slot,
555 request_complete (SIM_CPU *cpu, CACHE_QUEUE_ELEMENT *q)
558 if (! q->active || q->cycles > 0)
561 cache = CPU_DATA_CACHE (cpu);
565 /* For loads, we must wait until the data is returned from the cache. */
566 if (frv_cache_data_in_buffer (cache, 0, q->address, q->reqno))
568 copy_load_data (cpu, cache, 0, q);
571 if (frv_cache_data_in_buffer (cache, 1, q->address, q->reqno))
573 copy_load_data (cpu, cache, 1, q);
579 /* We must wait until the data is flushed. */
580 if (frv_cache_data_flushed (cache, 0, q->address, q->reqno))
582 if (frv_cache_data_flushed (cache, 1, q->address, q->reqno))
587 /* All other requests are complete once they've been made. */
594 /* Run the insn and data caches through the given number of cycles, taking
595 note of load requests which are fullfilled as a result. */
597 run_caches (SIM_CPU *cpu, int cycles)
599 FRV_CACHE* data_cache = CPU_DATA_CACHE (cpu);
600 FRV_CACHE* insn_cache = CPU_INSN_CACHE (cpu);
602 /* For each cycle, run the caches, noting which requests have been fullfilled
603 and submitting new requests on their designated cycles. */
604 for (i = 0; i < cycles; ++i)
607 /* Run the caches through 1 cycle. */
608 frv_cache_run (data_cache, 1);
609 frv_cache_run (insn_cache, 1);
611 /* Note whether prefetched insn data has been loaded yet. */
612 for (j = LS; j < FRV_CACHE_PIPELINES; ++j)
614 if (frv_insn_fetch_buffer[j].reqno != NO_REQNO
615 && frv_cache_data_in_buffer (insn_cache, j,
616 frv_insn_fetch_buffer[j].address,
617 frv_insn_fetch_buffer[j].reqno))
618 frv_insn_fetch_buffer[j].reqno = NO_REQNO;
621 /* Check to see which requests have been satisfied and which should
623 for (j = 0; j < cache_queue.ix; ++j)
625 CACHE_QUEUE_ELEMENT *q = & cache_queue.q[j];
629 /* If a load has been satisfied, complete the operation and remove it
631 if (request_complete (cpu, q))
633 remove_cache_queue_element (cpu, j);
638 /* Decrease the cycle count of each queued request.
639 Submit a request for each queued request whose cycle count has
643 submit_cache_request (q);
649 apply_latency_adjustments (SIM_CPU *cpu)
651 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
653 /* update the latencies of the registers. */
654 int *fr = ps->fr_busy;
655 int *acc = ps->acc_busy;
656 for (i = 0; i < 64; ++i)
658 if (ps->fr_busy_adjust[i] > 0)
659 *fr -= ps->fr_busy_adjust[i]; /* OK if it goes negative. */
660 if (ps->acc_busy_adjust[i] > 0)
661 *acc -= ps->acc_busy_adjust[i]; /* OK if it goes negative. */
667 /* Account for the number of cycles which have just passed in the latency of
668 various system elements. Works for negative cycles too so that latency
669 can be extended in the case of insn fetch latency.
670 If negative or zero, then no adjustment is necessary. */
672 update_latencies (SIM_CPU *cpu, int cycles)
674 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
676 /* update the latencies of the registers. */
681 int *gr = ps->gr_busy;
682 int *fr = ps->fr_busy;
683 int *acc = ps->acc_busy;
684 /* This loop handles GR, FR and ACC registers. */
685 for (i = 0; i < 64; ++i)
690 reset_gr_flags (cpu, i);
694 /* If the busy drops to 0, then mark the register as
698 int *fr_lat = ps->fr_latency + i;
700 ps->fr_busy_adjust[i] = 0;
701 /* Only clear flags if this register has no target latency. */
703 reset_fr_flags (cpu, i);
707 /* If the busy drops to 0, then mark the register as
711 int *acc_lat = ps->acc_latency + i;
713 ps->acc_busy_adjust[i] = 0;
714 /* Only clear flags if this register has no target latency. */
716 reset_acc_flags (cpu, i);
724 /* This loop handles CCR registers. */
726 for (i = 0; i < 8; ++i)
731 reset_cc_flags (cpu, i);
737 /* This loop handles resources. */
738 idiv = ps->idiv_busy;
739 fdiv = ps->fdiv_busy;
740 fsqrt = ps->fsqrt_busy;
741 for (i = 0; i < 2; ++i)
743 *idiv = (*idiv <= cycles) ? 0 : (*idiv - cycles);
744 *fdiv = (*fdiv <= cycles) ? 0 : (*fdiv - cycles);
745 *fsqrt = (*fsqrt <= cycles) ? 0 : (*fsqrt - cycles);
752 /* Print information about the wait for the given number of cycles. */
754 frv_model_trace_wait_cycles (SIM_CPU *cpu, int cycles, const char *hazard_name)
756 if (TRACE_INSN_P (cpu) && cycles > 0)
758 SIM_DESC sd = CPU_STATE (cpu);
759 trace_printf (sd, cpu, "**** %s wait %d cycles ***\n",
760 hazard_name, cycles);
765 trace_vliw_wait_cycles (SIM_CPU *cpu)
767 if (TRACE_INSN_P (cpu))
769 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
770 frv_model_trace_wait_cycles (cpu, ps->vliw_wait, hazard_name);
774 /* Wait for the given number of cycles. */
776 frv_model_advance_cycles (SIM_CPU *cpu, int cycles)
778 PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
779 update_latencies (cpu, cycles);
780 run_caches (cpu, cycles);
781 PROFILE_MODEL_TOTAL_CYCLES (p) += cycles;
785 handle_resource_wait (SIM_CPU *cpu)
787 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
788 if (ps->vliw_wait != 0)
789 frv_model_advance_cycles (cpu, ps->vliw_wait);
790 if (ps->vliw_load_stall > ps->vliw_wait)
791 ps->vliw_load_stall -= ps->vliw_wait;
793 ps->vliw_load_stall = 0;
796 /* Account for the number of cycles until these resources will be available
799 update_target_latencies (SIM_CPU *cpu)
801 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
803 /* update the latencies of the registers. */
805 int *gr_lat = ps->gr_latency;
806 int *fr_lat = ps->fr_latency;
807 int *acc_lat = ps->acc_latency;
809 int *gr = ps->gr_busy;
810 int *fr = ps->fr_busy;
811 int *acc = ps->acc_busy;
812 /* This loop handles GR, FR and ACC registers. */
813 for (i = 0; i < 64; ++i)
834 /* This loop handles CCR registers. */
836 ccr_lat = ps->ccr_latency;
837 for (i = 0; i < 8; ++i)
848 /* Run the caches until all pending cache flushes are complete. */
850 wait_for_flush (SIM_CPU *cpu)
852 SI address = CPU_LOAD_ADDRESS (cpu);
854 while (flush_pending_for_address (cpu, address))
856 frv_model_advance_cycles (cpu, 1);
859 if (TRACE_INSN_P (cpu) && wait)
861 sprintf (hazard_name, "Data cache flush address %p:", address);
862 frv_model_trace_wait_cycles (cpu, wait, hazard_name);
866 /* Initialize cycle counting for an insn.
867 FIRST_P is non-zero if this is the first insn in a set of parallel
870 frvbf_model_insn_before (SIM_CPU *cpu, int first_p)
872 SIM_DESC sd = CPU_STATE (cpu);
873 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
877 memset (ps->fr_busy_adjust, 0, sizeof (ps->fr_busy_adjust));
878 memset (ps->acc_busy_adjust, 0, sizeof (ps->acc_busy_adjust));
884 ps->vliw_branch_taken = 0;
885 ps->vliw_load_stall = 0;
888 switch (STATE_ARCHITECTURE (sd)->mach)
891 fr400_model_insn_before (cpu, first_p);
894 fr500_model_insn_before (cpu, first_p);
901 wait_for_flush (cpu);
904 /* Record the cycles computed for an insn.
905 LAST_P is non-zero if this is the last insn in a set of parallel insns,
906 and we update the total cycle count.
907 CYCLES is the cycle count of the insn. */
910 frvbf_model_insn_after (SIM_CPU *cpu, int last_p, int cycles)
912 PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
913 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
914 SIM_DESC sd = CPU_STATE (cpu);
916 PROFILE_MODEL_CUR_INSN_CYCLES (p) = cycles;
918 /* The number of cycles for a VLIW insn is the maximum number of cycles
919 used by any individual insn within it. */
920 if (cycles > ps->vliw_cycles)
921 ps->vliw_cycles = cycles;
925 /* This is the last insn in a VLIW insn. */
926 struct frv_interrupt_timer *timer = & frv_interrupt_state.timer;
928 activate_cache_requests (cpu); /* before advancing cycles. */
929 apply_latency_adjustments (cpu); /* must go first. */
930 update_target_latencies (cpu); /* must go next. */
931 frv_model_advance_cycles (cpu, ps->vliw_cycles);
933 PROFILE_MODEL_LOAD_STALL_CYCLES (p) += ps->vliw_load_stall;
935 /* Check the interrupt timer. cycles contains the total cycle count. */
938 cycles = PROFILE_MODEL_TOTAL_CYCLES (p);
939 if (timer->current % timer->value
940 + (cycles - timer->current) >= timer->value)
941 frv_queue_external_interrupt (cpu, timer->interrupt);
942 timer->current = cycles;
945 ps->past_first_p = 0; /* Next one will be the first in a new VLIW. */
946 ps->branch_address = -1;
949 ps->past_first_p = 1;
951 switch (STATE_ARCHITECTURE (sd)->mach)
954 fr400_model_insn_after (cpu, last_p, cycles);
957 fr500_model_insn_after (cpu, last_p, cycles);
965 frvbf_model_branch (SIM_CPU *current_cpu, PCADDR target, int hint)
967 /* Record the hint and branch address for use in profiling. */
968 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
969 ps->branch_hint = hint;
970 ps->branch_address = target;
973 /* Top up the latency of the given GR by the given number of cycles. */
975 update_GR_latency (SIM_CPU *cpu, INT out_GR, int cycles)
979 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
980 int *gr = ps->gr_latency;
981 if (gr[out_GR] < cycles)
987 decrease_GR_busy (SIM_CPU *cpu, INT in_GR, int cycles)
991 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
992 int *gr = ps->gr_busy;
997 /* Top up the latency of the given double GR by the number of cycles. */
999 update_GRdouble_latency (SIM_CPU *cpu, INT out_GR, int cycles)
1003 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1004 int *gr = ps->gr_latency;
1005 if (gr[out_GR] < cycles)
1006 gr[out_GR] = cycles;
1007 if (out_GR < 63 && gr[out_GR + 1] < cycles)
1008 gr[out_GR + 1] = cycles;
1013 update_GR_latency_for_load (SIM_CPU *cpu, INT out_GR, int cycles)
1017 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1018 int *gr = ps->gr_latency;
1020 /* The latency of the GR will be at least the number of cycles used
1022 if (gr[out_GR] < cycles)
1023 gr[out_GR] = cycles;
1025 /* The latency will also depend on how long it takes to retrieve the
1026 data from the cache or memory. Assume that the load is issued
1027 after the last cycle of the insn. */
1028 request_cache_load (cpu, out_GR, REGTYPE_NONE, cycles);
1033 update_GRdouble_latency_for_load (SIM_CPU *cpu, INT out_GR, int cycles)
1037 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1038 int *gr = ps->gr_latency;
1040 /* The latency of the GR will be at least the number of cycles used
1042 if (gr[out_GR] < cycles)
1043 gr[out_GR] = cycles;
1044 if (out_GR < 63 && gr[out_GR + 1] < cycles)
1045 gr[out_GR + 1] = cycles;
1047 /* The latency will also depend on how long it takes to retrieve the
1048 data from the cache or memory. Assume that the load is issued
1049 after the last cycle of the insn. */
1050 request_cache_load (cpu, out_GR, REGTYPE_NONE, cycles);
1055 update_GR_latency_for_swap (SIM_CPU *cpu, INT out_GR, int cycles)
1057 update_GR_latency_for_load (cpu, out_GR, cycles);
1060 /* Top up the latency of the given FR by the given number of cycles. */
1062 update_FR_latency (SIM_CPU *cpu, INT out_FR, int cycles)
1066 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1067 int *fr = ps->fr_latency;
1068 if (fr[out_FR] < cycles)
1069 fr[out_FR] = cycles;
1073 /* Top up the latency of the given double FR by the number of cycles. */
1075 update_FRdouble_latency (SIM_CPU *cpu, INT out_FR, int cycles)
1079 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1080 int *fr = ps->fr_latency;
1081 if (fr[out_FR] < cycles)
1082 fr[out_FR] = cycles;
1083 if (out_FR < 63 && fr[out_FR + 1] < cycles)
1084 fr[out_FR + 1] = cycles;
1089 update_FR_latency_for_load (SIM_CPU *cpu, INT out_FR, int cycles)
1093 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1094 int *fr = ps->fr_latency;
1096 /* The latency of the FR will be at least the number of cycles used
1098 if (fr[out_FR] < cycles)
1099 fr[out_FR] = cycles;
1101 /* The latency will also depend on how long it takes to retrieve the
1102 data from the cache or memory. Assume that the load is issued
1103 after the last cycle of the insn. */
1104 request_cache_load (cpu, out_FR, REGTYPE_FR, cycles);
1109 update_FRdouble_latency_for_load (SIM_CPU *cpu, INT out_FR, int cycles)
1113 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1114 int *fr = ps->fr_latency;
1116 /* The latency of the FR will be at least the number of cycles used
1118 if (fr[out_FR] < cycles)
1119 fr[out_FR] = cycles;
1120 if (out_FR < 63 && fr[out_FR + 1] < cycles)
1121 fr[out_FR + 1] = cycles;
1123 /* The latency will also depend on how long it takes to retrieve the
1124 data from the cache or memory. Assume that the load is issued
1125 after the last cycle of the insn. */
1126 request_cache_load (cpu, out_FR, REGTYPE_FR, cycles);
1130 /* Top up the post-processing time of the given FR by the given number of
1133 update_ACC_ptime (SIM_CPU *cpu, INT out_ACC, int cycles)
1137 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1138 /* No load can be pending on this register. Apply the cycles
1139 directly to the latency of the register. */
1140 int *acc = ps->acc_latency;
1141 acc[out_ACC] += cycles;
1146 decrease_ACC_busy (SIM_CPU *cpu, INT out_ACC, int cycles)
1150 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1151 int *acc = ps->acc_busy;
1152 acc[out_ACC] -= cycles;
1153 if (ps->acc_busy_adjust[out_ACC] >= 0
1154 && cycles > ps->acc_busy_adjust[out_ACC])
1155 ps->acc_busy_adjust[out_ACC] = cycles;
1160 decrease_FR_busy (SIM_CPU *cpu, INT out_FR, int cycles)
1164 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1165 int *fr = ps->fr_busy;
1166 fr[out_FR] -= cycles;
1167 if (ps->fr_busy_adjust[out_FR] >= 0
1168 && cycles > ps->fr_busy_adjust[out_FR])
1169 ps->fr_busy_adjust[out_FR] = cycles;
1174 increase_FR_busy (SIM_CPU *cpu, INT out_FR, int cycles)
1178 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1179 int *fr = ps->fr_busy;
1180 fr[out_FR] += cycles;
1184 /* Top up the latency of the given ACC by the given number of cycles. */
1186 update_ACC_latency (SIM_CPU *cpu, INT out_ACC, int cycles)
1190 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1191 int *acc = ps->acc_latency;
1192 if (acc[out_ACC] < cycles)
1193 acc[out_ACC] = cycles;
1197 /* Top up the latency of the given CCR by the given number of cycles. */
1199 update_CCR_latency (SIM_CPU *cpu, INT out_CCR, int cycles)
1203 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1204 int *ccr = ps->ccr_latency;
1205 if (ccr[out_CCR] < cycles)
1206 ccr[out_CCR] = cycles;
1210 /* Top up the latency of the given integer division resource by the given
1211 number of cycles. */
1213 update_idiv_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1215 /* operate directly on the busy cycles since each resource can only
1216 be used once in a VLIW insn. */
1217 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1218 int *r = ps->idiv_busy;
1219 r[in_resource] = cycles;
1222 /* Set the latency of the given resource to the given number of cycles. */
1224 update_fdiv_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1226 /* operate directly on the busy cycles since each resource can only
1227 be used once in a VLIW insn. */
1228 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1229 int *r = ps->fdiv_busy;
1230 r[in_resource] = cycles;
1233 /* Set the latency of the given resource to the given number of cycles. */
1235 update_fsqrt_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1237 /* operate directly on the busy cycles since each resource can only
1238 be used once in a VLIW insn. */
1239 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1240 int *r = ps->fsqrt_busy;
1241 r[in_resource] = cycles;
1244 /* Set the branch penalty to the given number of cycles. */
1246 update_branch_penalty (SIM_CPU *cpu, int cycles)
1248 /* operate directly on the busy cycles since only one branch can occur
1250 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1251 ps->branch_penalty = cycles;
1254 /* Check the availability of the given GR register and update the number
1255 of cycles the current VLIW insn must wait until it is available. */
1257 vliw_wait_for_GR (SIM_CPU *cpu, INT in_GR)
1259 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1260 int *gr = ps->gr_busy;
1261 /* If the latency of the register is greater than the current wait
1262 then update the current wait. */
1263 if (in_GR >= 0 && gr[in_GR] > ps->vliw_wait)
1265 if (TRACE_INSN_P (cpu))
1266 sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
1267 ps->vliw_wait = gr[in_GR];
1271 /* Check the availability of the given GR register and update the number
1272 of cycles the current VLIW insn must wait until it is available. */
1274 vliw_wait_for_GRdouble (SIM_CPU *cpu, INT in_GR)
1276 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1277 int *gr = ps->gr_busy;
1278 /* If the latency of the register is greater than the current wait
1279 then update the current wait. */
1282 if (gr[in_GR] > ps->vliw_wait)
1284 if (TRACE_INSN_P (cpu))
1285 sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
1286 ps->vliw_wait = gr[in_GR];
1288 if (in_GR < 63 && gr[in_GR + 1] > ps->vliw_wait)
1290 if (TRACE_INSN_P (cpu))
1291 sprintf (hazard_name, "Data hazard for gr%d:", in_GR + 1);
1292 ps->vliw_wait = gr[in_GR + 1];
1297 /* Check the availability of the given FR register and update the number
1298 of cycles the current VLIW insn must wait until it is available. */
1300 vliw_wait_for_FR (SIM_CPU *cpu, INT in_FR)
1302 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1303 int *fr = ps->fr_busy;
1304 /* If the latency of the register is greater than the current wait
1305 then update the current wait. */
1306 if (in_FR >= 0 && fr[in_FR] > ps->vliw_wait)
1308 if (TRACE_INSN_P (cpu))
1309 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1310 ps->vliw_wait = fr[in_FR];
1314 /* Check the availability of the given GR register and update the number
1315 of cycles the current VLIW insn must wait until it is available. */
1317 vliw_wait_for_FRdouble (SIM_CPU *cpu, INT in_FR)
1319 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1320 int *fr = ps->fr_busy;
1321 /* If the latency of the register is greater than the current wait
1322 then update the current wait. */
1325 if (fr[in_FR] > ps->vliw_wait)
1327 if (TRACE_INSN_P (cpu))
1328 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1329 ps->vliw_wait = fr[in_FR];
1331 if (in_FR < 63 && fr[in_FR + 1] > ps->vliw_wait)
1333 if (TRACE_INSN_P (cpu))
1334 sprintf (hazard_name, "Data hazard for fr%d:", in_FR + 1);
1335 ps->vliw_wait = fr[in_FR + 1];
1340 /* Check the availability of the given CCR register and update the number
1341 of cycles the current VLIW insn must wait until it is available. */
1343 vliw_wait_for_CCR (SIM_CPU *cpu, INT in_CCR)
1345 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1346 int *ccr = ps->ccr_busy;
1347 /* If the latency of the register is greater than the current wait
1348 then update the current wait. */
1349 if (in_CCR >= 0 && ccr[in_CCR] > ps->vliw_wait)
1351 if (TRACE_INSN_P (cpu))
1354 sprintf (hazard_name, "Data hazard for icc%d:", in_CCR-4);
1356 sprintf (hazard_name, "Data hazard for fcc%d:", in_CCR);
1358 ps->vliw_wait = ccr[in_CCR];
1362 /* Check the availability of the given ACC register and update the number
1363 of cycles the current VLIW insn must wait until it is available. */
1365 vliw_wait_for_ACC (SIM_CPU *cpu, INT in_ACC)
1367 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1368 int *acc = ps->acc_busy;
1369 /* If the latency of the register is greater than the current wait
1370 then update the current wait. */
1371 if (in_ACC >= 0 && acc[in_ACC] > ps->vliw_wait)
1373 if (TRACE_INSN_P (cpu))
1374 sprintf (hazard_name, "Data hazard for acc%d:", in_ACC);
1375 ps->vliw_wait = acc[in_ACC];
1379 /* Check the availability of the given integer division resource and update
1380 the number of cycles the current VLIW insn must wait until it is available.
1383 vliw_wait_for_idiv_resource (SIM_CPU *cpu, INT in_resource)
1385 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1386 int *r = ps->idiv_busy;
1387 /* If the latency of the resource is greater than the current wait
1388 then update the current wait. */
1389 if (r[in_resource] > ps->vliw_wait)
1391 if (TRACE_INSN_P (cpu))
1393 sprintf (hazard_name, "Resource hazard for integer division in slot I%d:", in_resource);
1395 ps->vliw_wait = r[in_resource];
1399 /* Check the availability of the given float division resource and update
1400 the number of cycles the current VLIW insn must wait until it is available.
1403 vliw_wait_for_fdiv_resource (SIM_CPU *cpu, INT in_resource)
1405 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1406 int *r = ps->fdiv_busy;
1407 /* If the latency of the resource is greater than the current wait
1408 then update the current wait. */
1409 if (r[in_resource] > ps->vliw_wait)
1411 if (TRACE_INSN_P (cpu))
1413 sprintf (hazard_name, "Resource hazard for integer division in slot I%d:", in_resource);
1415 ps->vliw_wait = r[in_resource];
1419 /* Check the availability of the given float square root resource and update
1420 the number of cycles the current VLIW insn must wait until it is available.
1423 vliw_wait_for_fsqrt_resource (SIM_CPU *cpu, INT in_resource)
1425 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1426 int *r = ps->fsqrt_busy;
1427 /* If the latency of the resource is greater than the current wait
1428 then update the current wait. */
1429 if (r[in_resource] > ps->vliw_wait)
1431 if (TRACE_INSN_P (cpu))
1433 sprintf (hazard_name, "Resource hazard for integer division in slot I%d:", in_resource);
1435 ps->vliw_wait = r[in_resource];
1439 /* Run the caches until all requests for the given register(s) are satisfied. */
1441 load_wait_for_GR (SIM_CPU *cpu, INT in_GR)
1446 while (load_pending_for_register (cpu, in_GR, 1/*words*/, REGTYPE_NONE))
1448 frv_model_advance_cycles (cpu, 1);
1453 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1454 ps->vliw_wait += wait;
1455 ps->vliw_load_stall += wait;
1456 if (TRACE_INSN_P (cpu))
1457 sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
1463 load_wait_for_FR (SIM_CPU *cpu, INT in_FR)
1467 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1470 while (load_pending_for_register (cpu, in_FR, 1/*words*/, REGTYPE_FR))
1472 frv_model_advance_cycles (cpu, 1);
1475 /* Post processing time may have been added to the register's
1476 latency after the loads were processed. Account for that too.
1482 frv_model_advance_cycles (cpu, fr[in_FR]);
1484 /* Update the vliw_wait with the number of cycles we waited for the
1485 load and any post-processing. */
1488 ps->vliw_wait += wait;
1489 ps->vliw_load_stall += wait;
1490 if (TRACE_INSN_P (cpu))
1491 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1497 load_wait_for_GRdouble (SIM_CPU *cpu, INT in_GR)
1502 while (load_pending_for_register (cpu, in_GR, 2/*words*/, REGTYPE_NONE))
1504 frv_model_advance_cycles (cpu, 1);
1509 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1510 ps->vliw_wait += wait;
1511 ps->vliw_load_stall += wait;
1512 if (TRACE_INSN_P (cpu))
1513 sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
1519 load_wait_for_FRdouble (SIM_CPU *cpu, INT in_FR)
1523 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1526 while (load_pending_for_register (cpu, in_FR, 2/*words*/, REGTYPE_FR))
1528 frv_model_advance_cycles (cpu, 1);
1531 /* Post processing time may have been added to the registers'
1532 latencies after the loads were processed. Account for that too.
1538 frv_model_advance_cycles (cpu, fr[in_FR]);
1544 wait += fr[in_FR + 1];
1545 frv_model_advance_cycles (cpu, fr[in_FR + 1]);
1548 /* Update the vliw_wait with the number of cycles we waited for the
1549 load and any post-processing. */
1552 ps->vliw_wait += wait;
1553 ps->vliw_load_stall += wait;
1554 if (TRACE_INSN_P (cpu))
1555 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1561 enforce_full_fr_latency (SIM_CPU *cpu, INT in_FR)
1563 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1564 ps->fr_busy_adjust [in_FR] = -1;
1567 /* Calculate how long the post processing for a floating point insn must
1568 wait for resources to become available. */
1570 post_wait_for_FR (SIM_CPU *cpu, INT in_FR)
1572 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1573 int *fr = ps->fr_busy;
1575 if (in_FR >= 0 && fr[in_FR] > ps->post_wait)
1577 ps->post_wait = fr[in_FR];
1578 if (TRACE_INSN_P (cpu))
1579 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1583 /* Calculate how long the post processing for a floating point insn must
1584 wait for resources to become available. */
1586 post_wait_for_FRdouble (SIM_CPU *cpu, INT in_FR)
1588 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1589 int *fr = ps->fr_busy;
1593 if (fr[in_FR] > ps->post_wait)
1595 ps->post_wait = fr[in_FR];
1596 if (TRACE_INSN_P (cpu))
1597 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1599 if (in_FR < 63 && fr[in_FR + 1] > ps->post_wait)
1601 ps->post_wait = fr[in_FR + 1];
1602 if (TRACE_INSN_P (cpu))
1603 sprintf (hazard_name, "Data hazard for fr%d:", in_FR + 1);
1609 post_wait_for_ACC (SIM_CPU *cpu, INT in_ACC)
1611 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1612 int *acc = ps->acc_busy;
1614 if (in_ACC >= 0 && acc[in_ACC] > ps->post_wait)
1616 ps->post_wait = acc[in_ACC];
1617 if (TRACE_INSN_P (cpu))
1618 sprintf (hazard_name, "Data hazard for acc%d:", in_ACC);
1623 post_wait_for_CCR (SIM_CPU *cpu, INT in_CCR)
1625 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1626 int *ccr = ps->ccr_busy;
1628 if (in_CCR >= 0 && ccr[in_CCR] > ps->post_wait)
1630 ps->post_wait = ccr[in_CCR];
1631 if (TRACE_INSN_P (cpu))
1634 sprintf (hazard_name, "Data hazard for icc%d:", in_CCR - 4);
1636 sprintf (hazard_name, "Data hazard for fcc%d:", in_CCR);
1642 post_wait_for_fdiv (SIM_CPU *cpu, INT slot)
1644 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1645 int *fdiv = ps->fdiv_busy;
1647 /* Multiple floating point divisions in the same slot need only wait 1
1649 if (fdiv[slot] > 0 && 1 > ps->post_wait)
1652 if (TRACE_INSN_P (cpu))
1654 sprintf (hazard_name, "Resource hazard for floating point division in slot F%d:", slot);
1660 post_wait_for_fsqrt (SIM_CPU *cpu, INT slot)
1662 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1663 int *fsqrt = ps->fsqrt_busy;
1665 /* Multiple floating point square roots in the same slot need only wait 1
1667 if (fsqrt[slot] > 0 && 1 > ps->post_wait)
1670 if (TRACE_INSN_P (cpu))
1672 sprintf (hazard_name, "Resource hazard for square root in slot F%d:", slot);
1677 /* Print cpu-specific profile information. */
1678 #define COMMAS(n) sim_add_commas (comma_buf, sizeof (comma_buf), (n))
1681 print_cache (SIM_CPU *cpu, FRV_CACHE *cache, const char *cache_name)
1683 SIM_DESC sd = CPU_STATE (cpu);
1690 sim_io_printf (sd, " %s Cache\n\n", cache_name);
1691 accesses = cache->statistics.accesses;
1692 sim_io_printf (sd, " Total accesses: %s\n", COMMAS (accesses));
1696 unsigned hits = cache->statistics.hits;
1697 sim_io_printf (sd, " Hits: %s\n", COMMAS (hits));
1698 rate = (float)hits / accesses;
1699 sim_io_printf (sd, " Hit rate: %.2f%%\n", rate * 100);
1703 sim_io_printf (sd, " Model %s has no %s cache\n",
1704 MODEL_NAME (CPU_MODEL (cpu)), cache_name);
1706 sim_io_printf (sd, "\n");
1714 "FM1", "FM1", "FM01",
1720 print_parallel (SIM_CPU *cpu, int verbose)
1722 SIM_DESC sd = CPU_STATE (cpu);
1723 PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
1724 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1725 unsigned total, vliw;
1729 sim_io_printf (sd, "Model %s Parallelization\n\n",
1730 MODEL_NAME (CPU_MODEL (cpu)));
1732 total = PROFILE_TOTAL_INSN_COUNT (p);
1733 sim_io_printf (sd, " Total instructions: %s\n", COMMAS (total));
1734 vliw = ps->vliw_insns;
1735 sim_io_printf (sd, " VLIW instructions: %s\n", COMMAS (vliw));
1736 average = (float)total / vliw;
1737 sim_io_printf (sd, " Average VLIW length: %.2f\n", average);
1738 average = (float)PROFILE_MODEL_TOTAL_CYCLES (p) / vliw;
1739 sim_io_printf (sd, " Cycles per VLIW instruction: %.2f\n", average);
1740 average = (float)total / PROFILE_MODEL_TOTAL_CYCLES (p);
1741 sim_io_printf (sd, " Instructions per cycle: %.2f\n", average);
1747 int max_name_len = 0;
1748 for (i = UNIT_NIL + 1; i < UNIT_NUM_UNITS; ++i)
1751 if (INSNS_IN_SLOT (i) > max_val)
1752 max_val = INSNS_IN_SLOT (i);
1753 len = strlen (slot_names[i]);
1754 if (len > max_name_len)
1759 sim_io_printf (sd, "\n");
1760 sim_io_printf (sd, " Instructions per slot:\n");
1761 sim_io_printf (sd, "\n");
1762 for (i = UNIT_NIL + 1; i < UNIT_NUM_UNITS; ++i)
1764 if (INSNS_IN_SLOT (i) != 0)
1766 sim_io_printf (sd, " %*s: %*s: ",
1767 max_name_len, slot_names[i],
1768 max_val < 10000 ? 5 : 10,
1769 COMMAS (INSNS_IN_SLOT (i)));
1770 sim_profile_print_bar (sd, PROFILE_HISTOGRAM_WIDTH,
1773 sim_io_printf (sd, "\n");
1776 } /* details to print */
1779 sim_io_printf (sd, "\n");
1783 frv_profile_info (SIM_CPU *cpu, int verbose)
1785 /* FIXME: Need to add smp support. */
1786 PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
1788 #if WITH_PROFILE_PARALLEL_P
1789 if (PROFILE_FLAGS (p) [PROFILE_PARALLEL_IDX])
1790 print_parallel (cpu, verbose);
1793 #if WITH_PROFILE_CACHE_P
1794 if (PROFILE_FLAGS (p) [PROFILE_CACHE_IDX])
1796 SIM_DESC sd = CPU_STATE (cpu);
1797 sim_io_printf (sd, "Model %s Cache Statistics\n\n",
1798 MODEL_NAME (CPU_MODEL (cpu)));
1799 print_cache (cpu, CPU_INSN_CACHE (cpu), "Instruction");
1800 print_cache (cpu, CPU_DATA_CACHE (cpu), "Data");
1802 #endif /* WITH_PROFILE_CACHE_P */
1805 /* A hack to get registers referenced for profiling. */
1806 SI frv_ref_SI (SI ref) {return ref;}
1807 #endif /* WITH_PROFILE_MODEL_P */