1 /* This file is part of the program psim.
3 Copyright (C) 1994-1998, Andrew Cagney <cagney@highland.com.au>
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
30 #if !defined (SIM_EVENTS_POLL_RATE)
31 #define SIM_EVENTS_POLL_RATE 0x1000
36 /* The event queue maintains a single absolute time using two
39 TIME_OF_EVENT: this holds the time at which the next event is ment
40 to occure. If no next event it will hold the time of the last
43 TIME_FROM_EVENT: The current distance from TIME_OF_EVENT. If an
44 event is pending, this will be positive. If no future event is
45 pending this will be negative. This variable is decremented once
46 for each iteration of a clock cycle.
48 Initially, the clock is started at time one (1) with TIME_OF_EVENT
49 == 0 and TIME_FROM_EVENT == -1.
51 Clearly there is a bug in that this code assumes that the absolute
52 time counter will never become greater than 2^62. */
54 typedef struct _event_entry event_entry;
57 event_handler *handler;
58 signed64 time_of_event;
65 event_entry *volatile held;
66 event_entry *volatile *volatile held_end;
67 signed64 time_of_event;
68 signed64 time_from_event;
74 sim_events_poll (void *data)
76 event_queue *queue = data;
77 /* just re-schedule in 1000 million ticks time */
78 event_queue_schedule (queue, SIM_EVENTS_POLL_RATE, sim_events_poll, queue);
85 event_queue_create(void)
87 event_queue *new_event_queue = ZALLOC(event_queue);
89 new_event_queue->processing = 0;
90 new_event_queue->queue = NULL;
91 new_event_queue->held = NULL;
92 new_event_queue->held_end = &new_event_queue->held;
94 /* both times are already zero */
95 return new_event_queue;
101 event_queue_init(event_queue *queue)
105 /* drain the interrupt queue */
107 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
110 sigfillset(&new_mask);
111 /*-LOCK-*/ sigprocmask(SIG_SETMASK, &new_mask, &old_mask);
114 while (event != NULL) {
115 event_entry *dead = event;
120 queue->held_end = &queue->held;
121 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
122 /*-UNLOCK-*/ sigprocmask(SIG_SETMASK, &old_mask, NULL);
126 /* drain the normal queue */
127 event = queue->queue;
128 while (event != NULL) {
129 event_entry *dead = event;
135 /* wind time back to one */
136 queue->processing = 0;
137 queue->time_of_event = 0;
138 queue->time_from_event = -1;
140 /* schedule our initial counter event */
141 event_queue_schedule (queue, 0, sim_events_poll, queue);
146 event_queue_time(event_queue *queue)
148 return queue->time_of_event - queue->time_from_event;
151 STATIC_INLINE_EVENTS\
153 update_time_from_event(event_queue *events)
155 signed64 current_time = event_queue_time(events);
156 if (events->queue != NULL) {
157 events->time_from_event = (events->queue->time_of_event - current_time);
158 events->time_of_event = events->queue->time_of_event;
161 events->time_of_event = current_time - 1;
162 events->time_from_event = -1;
164 if (WITH_TRACE && ppc_trace[trace_events])
168 for (event = events->queue, i = 0;
170 event = event->next, i++)
172 TRACE(trace_events, ("event time-from-event - time %ld, delta %ld - event %d, tag 0x%lx, time %ld, handler 0x%lx, data 0x%lx\n",
174 (long)events->time_from_event,
177 (long)event->time_of_event,
178 (long)event->handler,
182 ASSERT(current_time == event_queue_time(events));
185 STATIC_INLINE_EVENTS\
187 insert_event_entry(event_queue *events,
188 event_entry *new_event,
193 signed64 time_of_event;
196 error("what is past is past!\n");
198 /* compute when the event should occure */
199 time_of_event = event_queue_time(events) + delta;
201 /* find the queue insertion point - things are time ordered */
202 prev = &events->queue;
203 curr = events->queue;
204 while (curr != NULL && time_of_event >= curr->time_of_event) {
205 ASSERT(curr->next == NULL
206 || curr->time_of_event <= curr->next->time_of_event);
210 ASSERT(curr == NULL || time_of_event < curr->time_of_event);
213 new_event->next = curr;
215 new_event->time_of_event = time_of_event;
217 /* adjust the time until the first event */
218 update_time_from_event(events);
223 event_queue_schedule(event_queue *events,
225 event_handler *handler,
228 event_entry *new_event = ZALLOC(event_entry);
229 new_event->data = data;
230 new_event->handler = handler;
231 insert_event_entry(events, new_event, delta_time);
232 TRACE(trace_events, ("event scheduled at %ld - tag 0x%lx - time %ld, handler 0x%lx, data 0x%lx\n",
233 (long)event_queue_time(events),
235 (long)new_event->time_of_event,
236 (long)new_event->handler,
237 (long)new_event->data));
238 return (event_entry_tag)new_event;
244 event_queue_schedule_after_signal(event_queue *events,
246 event_handler *handler,
249 event_entry *new_event = ZALLOC(event_entry);
251 new_event->data = data;
252 new_event->handler = handler;
253 new_event->time_of_event = delta_time; /* work it out later */
254 new_event->next = NULL;
257 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
260 sigfillset(&new_mask);
261 /*-LOCK-*/ sigprocmask(SIG_SETMASK, &new_mask, &old_mask);
263 if (events->held == NULL) {
264 events->held = new_event;
267 *events->held_end = new_event;
269 events->held_end = &new_event->next;
270 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
271 /*-UNLOCK-*/ sigprocmask(SIG_SETMASK, &old_mask, NULL);
275 TRACE(trace_events, ("event scheduled at %ld - tag 0x%lx - time %ld, handler 0x%lx, data 0x%lx\n",
276 (long)event_queue_time(events),
278 (long)new_event->time_of_event,
279 (long)new_event->handler,
280 (long)new_event->data));
282 return (event_entry_tag)new_event;
288 event_queue_deschedule(event_queue *events,
289 event_entry_tag event_to_remove)
291 event_entry *to_remove = (event_entry*)event_to_remove;
292 ASSERT((events->time_from_event >= 0) == (events->queue != NULL));
293 if (event_to_remove != NULL) {
294 event_entry *current;
295 event_entry **ptr_to_current;
296 for (ptr_to_current = &events->queue, current = *ptr_to_current;
297 current != NULL && current != to_remove;
298 ptr_to_current = ¤t->next, current = *ptr_to_current);
299 if (current == to_remove) {
300 *ptr_to_current = current->next;
301 TRACE(trace_events, ("event descheduled at %ld - tag 0x%lx - time %ld, handler 0x%lx, data 0x%lx\n",
302 (long)event_queue_time(events),
303 (long)event_to_remove,
304 (long)current->time_of_event,
305 (long)current->handler,
306 (long)current->data));
308 update_time_from_event(events);
311 TRACE(trace_events, ("event descheduled at %ld - tag 0x%lx - not found\n",
312 (long)event_queue_time(events),
313 (long)event_to_remove));
316 ASSERT((events->time_from_event >= 0) == (events->queue != NULL));
324 event_queue_tick(event_queue *events)
326 signed64 time_from_event;
328 /* we should only be here when the previous tick has been fully processed */
329 ASSERT(!events->processing);
331 /* move any events that were queued by any signal handlers onto the
332 real event queue. BTW: When inlining, having this code here,
333 instead of in event_queue_process() causes GCC to put greater
334 weight on keeping the pointer EVENTS in a register. This, in
335 turn results in better code being output. */
336 if (events->held != NULL) {
337 event_entry *held_events;
338 event_entry *curr_event;
341 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
344 sigfillset(&new_mask);
345 /*-LOCK-*/ sigprocmask(SIG_SETMASK, &new_mask, &old_mask);
347 held_events = events->held;
349 events->held_end = &events->held;
350 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
351 /*-UNLOCK-*/ sigprocmask(SIG_SETMASK, &old_mask, NULL);
356 curr_event = held_events;
357 held_events = curr_event->next;
358 insert_event_entry(events, curr_event, curr_event->time_of_event);
359 } while (held_events != NULL);
362 /* advance time, checking to see if we've reached time zero which
363 would indicate the time for the next event has arrived */
364 time_from_event = events->time_from_event;
365 events->time_from_event = time_from_event - 1;
366 return time_from_event == 0;
373 event_queue_process(event_queue *events)
375 signed64 event_time = event_queue_time(events);
377 ASSERT((events->time_from_event == -1 && events->queue != NULL)
378 || events->processing); /* something to do */
380 /* consume all events for this or earlier times. Be careful to
381 allow a new event to appear under our feet */
382 events->processing = 1;
383 while (events->queue != NULL
384 && events->queue->time_of_event <= event_time) {
385 event_entry *to_do = events->queue;
386 event_handler *handler = to_do->handler;
387 void *data = to_do->data;
388 events->queue = to_do->next;
389 TRACE(trace_events, ("event issued at %ld - tag 0x%lx - time %ld, handler 0x%lx, data 0x%lx\n",
392 (long)to_do->time_of_event,
396 /* Always re-compute the time to the next event so that HANDLER()
397 can safely insert new events into the queue. */
398 update_time_from_event(events);
401 events->processing = 0;
403 ASSERT(events->time_from_event > 0);
404 ASSERT(events->queue != NULL); /* always poll event */
408 #endif /* _EVENTS_C_ */