1 /* This file is part of the program psim.
3 Copyright (C) 1994-1998, Andrew Cagney <cagney@highland.com.au>
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
31 #if !defined (SIM_EVENTS_POLL_RATE)
32 #define SIM_EVENTS_POLL_RATE 0x1000
37 /* The event queue maintains a single absolute time using two
40 TIME_OF_EVENT: this holds the time at which the next event is ment
41 to occure. If no next event it will hold the time of the last
44 TIME_FROM_EVENT: The current distance from TIME_OF_EVENT. If an
45 event is pending, this will be positive. If no future event is
46 pending this will be negative. This variable is decremented once
47 for each iteration of a clock cycle.
49 Initially, the clock is started at time one (1) with TIME_OF_EVENT
50 == 0 and TIME_FROM_EVENT == -1.
52 Clearly there is a bug in that this code assumes that the absolute
53 time counter will never become greater than 2^62. */
55 typedef struct _event_entry event_entry;
58 event_handler *handler;
59 signed64 time_of_event;
66 event_entry *volatile held;
67 event_entry *volatile *volatile held_end;
68 signed64 time_of_event;
69 signed64 time_from_event;
75 sim_events_poll (void *data)
77 event_queue *queue = data;
78 /* just re-schedule in 1000 million ticks time */
79 event_queue_schedule (queue, SIM_EVENTS_POLL_RATE, sim_events_poll, queue);
86 event_queue_create(void)
88 event_queue *new_event_queue = ZALLOC(event_queue);
90 new_event_queue->processing = 0;
91 new_event_queue->queue = NULL;
92 new_event_queue->held = NULL;
93 new_event_queue->held_end = &new_event_queue->held;
95 /* both times are already zero */
96 return new_event_queue;
102 event_queue_init(event_queue *queue)
106 /* drain the interrupt queue */
108 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
111 sigfillset(&new_mask);
112 /*-LOCK-*/ sigprocmask(SIG_SETMASK, &new_mask, &old_mask);
115 while (event != NULL) {
116 event_entry *dead = event;
121 queue->held_end = &queue->held;
122 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
123 /*-UNLOCK-*/ sigprocmask(SIG_SETMASK, &old_mask, NULL);
127 /* drain the normal queue */
128 event = queue->queue;
129 while (event != NULL) {
130 event_entry *dead = event;
136 /* wind time back to one */
137 queue->processing = 0;
138 queue->time_of_event = 0;
139 queue->time_from_event = -1;
141 /* schedule our initial counter event */
142 event_queue_schedule (queue, 0, sim_events_poll, queue);
147 event_queue_time(event_queue *queue)
149 return queue->time_of_event - queue->time_from_event;
152 STATIC_INLINE_EVENTS\
154 update_time_from_event(event_queue *events)
156 signed64 current_time = event_queue_time(events);
157 if (events->queue != NULL) {
158 events->time_from_event = (events->queue->time_of_event - current_time);
159 events->time_of_event = events->queue->time_of_event;
162 events->time_of_event = current_time - 1;
163 events->time_from_event = -1;
165 if (WITH_TRACE && ppc_trace[trace_events])
169 for (event = events->queue, i = 0;
171 event = event->next, i++)
173 TRACE(trace_events, ("event time-from-event - time %ld, delta %ld - event %d, tag 0x%lx, time %ld, handler 0x%lx, data 0x%lx\n",
175 (long)events->time_from_event,
178 (long)event->time_of_event,
179 (long)event->handler,
183 ASSERT(current_time == event_queue_time(events));
186 STATIC_INLINE_EVENTS\
188 insert_event_entry(event_queue *events,
189 event_entry *new_event,
194 signed64 time_of_event;
197 error("what is past is past!\n");
199 /* compute when the event should occure */
200 time_of_event = event_queue_time(events) + delta;
202 /* find the queue insertion point - things are time ordered */
203 prev = &events->queue;
204 curr = events->queue;
205 while (curr != NULL && time_of_event >= curr->time_of_event) {
206 ASSERT(curr->next == NULL
207 || curr->time_of_event <= curr->next->time_of_event);
211 ASSERT(curr == NULL || time_of_event < curr->time_of_event);
214 new_event->next = curr;
216 new_event->time_of_event = time_of_event;
218 /* adjust the time until the first event */
219 update_time_from_event(events);
224 event_queue_schedule(event_queue *events,
226 event_handler *handler,
229 event_entry *new_event = ZALLOC(event_entry);
230 new_event->data = data;
231 new_event->handler = handler;
232 insert_event_entry(events, new_event, delta_time);
233 TRACE(trace_events, ("event scheduled at %ld - tag 0x%lx - time %ld, handler 0x%lx, data 0x%lx\n",
234 (long)event_queue_time(events),
236 (long)new_event->time_of_event,
237 (long)new_event->handler,
238 (long)new_event->data));
239 return (event_entry_tag)new_event;
245 event_queue_schedule_after_signal(event_queue *events,
247 event_handler *handler,
250 event_entry *new_event = ZALLOC(event_entry);
252 new_event->data = data;
253 new_event->handler = handler;
254 new_event->time_of_event = delta_time; /* work it out later */
255 new_event->next = NULL;
258 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
261 sigfillset(&new_mask);
262 /*-LOCK-*/ sigprocmask(SIG_SETMASK, &new_mask, &old_mask);
264 if (events->held == NULL) {
265 events->held = new_event;
268 *events->held_end = new_event;
270 events->held_end = &new_event->next;
271 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
272 /*-UNLOCK-*/ sigprocmask(SIG_SETMASK, &old_mask, NULL);
276 TRACE(trace_events, ("event scheduled at %ld - tag 0x%lx - time %ld, handler 0x%lx, data 0x%lx\n",
277 (long)event_queue_time(events),
279 (long)new_event->time_of_event,
280 (long)new_event->handler,
281 (long)new_event->data));
283 return (event_entry_tag)new_event;
289 event_queue_deschedule(event_queue *events,
290 event_entry_tag event_to_remove)
292 event_entry *to_remove = (event_entry*)event_to_remove;
293 ASSERT((events->time_from_event >= 0) == (events->queue != NULL));
294 if (event_to_remove != NULL) {
295 event_entry *current;
296 event_entry **ptr_to_current;
297 for (ptr_to_current = &events->queue, current = *ptr_to_current;
298 current != NULL && current != to_remove;
299 ptr_to_current = ¤t->next, current = *ptr_to_current);
300 if (current == to_remove) {
301 *ptr_to_current = current->next;
302 TRACE(trace_events, ("event descheduled at %ld - tag 0x%lx - time %ld, handler 0x%lx, data 0x%lx\n",
303 (long)event_queue_time(events),
304 (long)event_to_remove,
305 (long)current->time_of_event,
306 (long)current->handler,
307 (long)current->data));
309 update_time_from_event(events);
312 TRACE(trace_events, ("event descheduled at %ld - tag 0x%lx - not found\n",
313 (long)event_queue_time(events),
314 (long)event_to_remove));
317 ASSERT((events->time_from_event >= 0) == (events->queue != NULL));
325 event_queue_tick(event_queue *events)
327 signed64 time_from_event;
329 /* we should only be here when the previous tick has been fully processed */
330 ASSERT(!events->processing);
332 /* move any events that were queued by any signal handlers onto the
333 real event queue. BTW: When inlining, having this code here,
334 instead of in event_queue_process() causes GCC to put greater
335 weight on keeping the pointer EVENTS in a register. This, in
336 turn results in better code being output. */
337 if (events->held != NULL) {
338 event_entry *held_events;
339 event_entry *curr_event;
342 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
345 sigfillset(&new_mask);
346 /*-LOCK-*/ sigprocmask(SIG_SETMASK, &new_mask, &old_mask);
348 held_events = events->held;
350 events->held_end = &events->held;
351 #if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
352 /*-UNLOCK-*/ sigprocmask(SIG_SETMASK, &old_mask, NULL);
357 curr_event = held_events;
358 held_events = curr_event->next;
359 insert_event_entry(events, curr_event, curr_event->time_of_event);
360 } while (held_events != NULL);
363 /* advance time, checking to see if we've reached time zero which
364 would indicate the time for the next event has arrived */
365 time_from_event = events->time_from_event;
366 events->time_from_event = time_from_event - 1;
367 return time_from_event == 0;
374 event_queue_process(event_queue *events)
376 signed64 event_time = event_queue_time(events);
378 ASSERT((events->time_from_event == -1 && events->queue != NULL)
379 || events->processing); /* something to do */
381 /* consume all events for this or earlier times. Be careful to
382 allow a new event to appear under our feet */
383 events->processing = 1;
384 while (events->queue != NULL
385 && events->queue->time_of_event <= event_time) {
386 event_entry *to_do = events->queue;
387 event_handler *handler = to_do->handler;
388 void *data = to_do->data;
389 events->queue = to_do->next;
390 TRACE(trace_events, ("event issued at %ld - tag 0x%lx - time %ld, handler 0x%lx, data 0x%lx\n",
393 (long)to_do->time_of_event,
397 /* Always re-compute the time to the next event so that HANDLER()
398 can safely insert new events into the queue. */
399 update_time_from_event(events);
402 events->processing = 0;
404 ASSERT(events->time_from_event > 0);
405 ASSERT(events->queue != NULL); /* always poll event */
409 #endif /* _EVENTS_C_ */