Add determining when we do not have enough writeback slots; Do not do model specific...
[external/binutils.git] / sim / ppc / cpu.c
1 /*  This file is part of the program psim.
2
3     Copyright (C) 1994-1995, Andrew Cagney <cagney@highland.com.au>
4
5     This program is free software; you can redistribute it and/or modify
6     it under the terms of the GNU General Public License as published by
7     the Free Software Foundation; either version 2 of the License, or
8     (at your option) any later version.
9
10     This program is distributed in the hope that it will be useful,
11     but WITHOUT ANY WARRANTY; without even the implied warranty of
12     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13     GNU General Public License for more details.
14  
15     You should have received a copy of the GNU General Public License
16     along with this program; if not, write to the Free Software
17     Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18  
19     */
20
21
22 #ifndef _CPU_C_
23 #define _CPU_C_
24
25 #ifndef STATIC_INLINE_CPU
26 #define STATIC_INLINE_CPU STATIC_INLINE
27 #endif
28
29 #include <setjmp.h>
30
31 #include "cpu.h"
32 #include "idecode.h"
33
34 #ifdef HAVE_STRING_H
35 #include <string.h>
36 #else
37 #ifdef HAVE_STRINGS_H
38 #include <strings.h>
39 #endif
40 #endif
41
42 struct _cpu {
43
44   /* the registers */
45   registers regs;
46
47   /* current instruction address */
48   unsigned_word program_counter;
49
50   /* the memory maps */
51   core *physical; /* all of memory */
52   vm *virtual;
53   vm_instruction_map *instruction_map; /* instructions */
54   vm_data_map *data_map; /* data */
55
56   /* current state of interrupt inputs */
57   int external_exception_pending;
58
59   /* the system this processor is contained within */
60   cpu_mon *monitor;
61   psim *system;
62   event_queue *events;
63   int cpu_nr;
64
65   /* Current CPU model information */
66   model_data *model_ptr;
67
68 #if WITH_IDECODE_CACHE_SIZE
69   /* a cache to store cracked instructions */
70   idecode_cache icache[WITH_IDECODE_CACHE_SIZE];
71 #endif
72
73   /* address reservation: keep the physical address and the contents
74      of memory at that address */
75   memory_reservation reservation;
76
77   /* offset from event time to this cpu's idea of the local time */
78   signed64 time_base_local_time;
79   signed64 decrementer_local_time;
80   event_entry_tag decrementer_event;
81
82 };
83
84
85 INLINE_CPU cpu *
86 cpu_create(psim *system,
87            core *memory,
88            event_queue *events,
89            cpu_mon *monitor,
90            int cpu_nr)
91 {
92   cpu *processor = ZALLOC(cpu);
93
94   /* create the virtual memory map from the core */
95   processor->physical = memory;
96   processor->virtual = vm_create(memory);
97   processor->instruction_map = vm_create_instruction_map(processor->virtual);
98   processor->data_map = vm_create_data_map(processor->virtual);
99
100   if (CURRENT_MODEL_ISSUE > 0)
101     processor->model_ptr = model_create (processor);
102
103   /* link back to core system */
104   processor->system = system;
105   processor->events = events;
106   processor->cpu_nr = cpu_nr;
107   processor->monitor = monitor;
108
109   return processor;
110 }
111
112
113 INLINE_CPU void
114 cpu_init(cpu *processor)
115 {
116   memset(&processor->regs, 0, sizeof(processor->regs));
117   /* FIXME - should any of VM be inited also ? */
118
119   if (CURRENT_MODEL_ISSUE > 0)
120     model_init (processor->model_ptr);
121 }
122
123
124 /* find ones way home */
125
126 INLINE_CPU psim *
127 cpu_system(cpu *processor)
128 {
129   return processor->system;
130 }
131
132 INLINE_CPU int
133 cpu_nr(cpu *processor)
134 {
135   return processor->cpu_nr;
136 }
137
138 INLINE_CPU event_queue *
139 cpu_event_queue(cpu *processor)
140 {
141   return processor->events;
142 }
143
144 INLINE_CPU cpu_mon *
145 cpu_monitor(cpu *processor)
146 {
147   return processor->monitor;
148 }
149
150 INLINE_CPU model_data *
151 cpu_model(cpu *processor)
152 {
153   return processor->model_ptr;
154 }
155
156 /* The processors local concept of time */
157
158 INLINE_CPU signed64
159 cpu_get_time_base(cpu *processor)
160 {
161   return (event_queue_time(processor->events)
162           - processor->time_base_local_time);
163 }
164
165 INLINE_CPU void
166 cpu_set_time_base(cpu *processor,
167                   signed64 time_base)
168 {
169   processor->time_base_local_time = (event_queue_time(processor->events)
170                                      - time_base);
171 }
172
173 INLINE_CPU signed32
174 cpu_get_decrementer(cpu *processor)
175 {
176   return (processor->decrementer_local_time
177           - event_queue_time(processor->events));
178 }
179
180 STATIC_INLINE_CPU void
181 cpu_decrement_event(event_queue *queue,
182                     void *data)
183 {
184   cpu *processor = (cpu*)data;
185   if (!decrementer_interrupt(processor)) {
186     processor->decrementer_event = event_queue_schedule(processor->events,
187                                                         1, /* NOW! */
188                                                         cpu_decrement_event,
189                                                         processor);
190   }
191 }
192
193 INLINE_CPU void
194 cpu_set_decrementer(cpu *processor,
195                     signed32 decrementer)
196 {
197   signed64 old_decrementer = (processor->decrementer_local_time
198                               - event_queue_time(processor->events));
199   event_queue_deschedule(processor->events, processor->decrementer_event);
200   processor->decrementer_local_time = (event_queue_time(processor->events)
201                                        + decrementer);
202   if (decrementer < 0 && old_decrementer >= 0)
203     /* dec interrupt occures if the sign of the decrement reg is
204        changed by the load operation */
205     processor->decrementer_event = event_queue_schedule(processor->events,
206                                                         1, /* NOW! */
207                                                         cpu_decrement_event,
208                                                         processor);
209   else if (decrementer >= 0)
210     processor->decrementer_event = event_queue_schedule(processor->events,
211                                                         decrementer,
212                                                         cpu_decrement_event,
213                                                         processor);
214 }
215
216
217 /* program counter manipulation */
218
219 INLINE_CPU void
220 cpu_set_program_counter(cpu *processor,
221                         unsigned_word new_program_counter)
222 {
223   processor->program_counter = new_program_counter;
224 }
225
226 INLINE_CPU unsigned_word
227 cpu_get_program_counter(cpu *processor)
228 {
229   return processor->program_counter;
230 }
231
232 INLINE_CPU void
233 cpu_restart(cpu *processor,
234             unsigned_word nia)
235 {
236   processor->program_counter = nia;
237   psim_restart(processor->system, processor->cpu_nr);
238 }
239
240 INLINE_CPU void
241 cpu_halt(cpu *processor,
242          unsigned_word cia,
243          stop_reason reason,
244          int signal)
245 {
246   if (processor == NULL) {
247     error("cpu_halt() processor=NULL, cia=0x%x, reason=%d, signal=%d\n",
248           cia,
249           reason,
250           signal);
251   }
252   else {
253     if (CURRENT_MODEL_ISSUE > 0)
254       model_halt(processor->model_ptr);
255
256     processor->program_counter = cia;
257     psim_halt(processor->system, processor->cpu_nr, cia, reason, signal);
258   }
259 }
260
261
262 #if WITH_IDECODE_CACHE_SIZE
263 /* allow access to the cpu's instruction cache */
264 INLINE_CPU idecode_cache *
265 cpu_icache_entry(cpu *processor,
266                  unsigned_word cia)
267 {
268   return &processor->icache[cia / 4 % WITH_IDECODE_CACHE_SIZE];
269 }
270
271
272 INLINE_CPU void
273 cpu_flush_icache(cpu *processor)
274 {
275   int i;
276   /* force all addresses to 0xff... so that they never hit */
277   for (i = 0; i < WITH_IDECODE_CACHE_SIZE; i++)
278     processor->icache[i].address = MASK(0, 63);
279 }
280 #endif
281
282
283 /* address map revelation */
284
285 INLINE_CPU vm_instruction_map *
286 cpu_instruction_map(cpu *processor)
287 {
288   return processor->instruction_map;
289 }
290
291 INLINE_CPU vm_data_map *
292 cpu_data_map(cpu *processor)
293 {
294   return processor->data_map;
295 }
296
297
298 /* reservation access */
299
300 INLINE_CPU memory_reservation *
301 cpu_reservation(cpu *processor)
302 {
303   return &processor->reservation;
304 }
305
306
307 /* register access */
308
309 INLINE_CPU registers *
310 cpu_registers(cpu *processor)
311 {
312   return &processor->regs;
313 }
314
315 INLINE_CPU void
316 cpu_synchronize_context(cpu *processor)
317 {
318 #if (WITH_IDECODE_CACHE)
319   /* kill of the cache */
320   cpu_flush_icache(processor);
321 #endif
322
323   /* update virtual memory */
324   vm_synchronize_context(processor->virtual,
325                          processor->regs.spr,
326                          processor->regs.sr,
327                          processor->regs.msr);
328 }
329
330
331 /* might again be useful one day */
332
333 INLINE_CPU void
334 cpu_print_info(cpu *processor, int verbose)
335 {
336 }
337
338 #endif /* _CPU_C_ */