2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
12 #include "afs/param.h"
14 #include <afs/param.h>
18 #include <sys/time_impl.h>
26 #include "afs/afs_osi.h"
28 #include "afs/sysincludes.h"
29 #include "afsincludes.h"
31 #include "rx/rx_clock.h"
32 #include "rx/rx_queue.h"
33 #include "rx/rx_event.h"
34 #include "rx/rx_kernel.h"
35 #include "rx_kmutex.h"
36 #ifdef RX_ENABLE_LOCKS
38 #endif /* RX_ENABLE_LOCKS */
39 #include "rx/rx_globals.h"
40 #if defined(AFS_SGI_ENV)
41 #include "sys/debug.h"
42 /* These are necessary to get curproc (used by GLOCK asserts) to work. */
44 #if !defined(AFS_SGI64_ENV) && !defined(UKERNEL)
47 extern void *osi_Alloc();
49 #if defined(AFS_OBSD_ENV)
58 #ifdef AFS_PTHREAD_ENV
59 #include <rx/rx_pthread.h>
63 #ifdef RX_ENABLE_LOCKS
65 #endif /* RX_ENABLE_LOCKS */
66 #include "rx_globals.h"
73 /* All event processing is relative to the apparent current time given by clock_GetTime */
75 /* This should be static, but event_test wants to look at the free list... */
76 struct rx_queue rxevent_free; /* It's somewhat bogus to use a doubly-linked queue for the free list */
77 struct rx_queue rxepoch_free; /* It's somewhat bogus to use a doubly-linked queue for the free list */
78 static struct rx_queue rxepoch_queue; /* list of waiting epochs */
79 static int rxevent_allocUnit = 10; /* Allocation unit (number of event records to allocate at one time) */
80 static int rxepoch_allocUnit = 10; /* Allocation unit (number of epoch records to allocate at one time) */
81 int rxevent_nFree; /* Number of free event records */
82 int rxevent_nPosted; /* Current number of posted events */
83 int rxepoch_nFree; /* Number of free epoch records */
84 static void (*rxevent_ScheduledEarlierEvent) (void); /* Proc to call when an event is scheduled that is earlier than all other events */
88 struct xfreelist *next;
90 static struct xfreelist *xfreemallocs = 0, *xsp = 0;
92 struct clock rxevent_nextRaiseEvents; /* Time of next call to raise events */
93 struct clock rxevent_lastEvent; /* backwards time detection */
94 int rxevent_raiseScheduled; /* true if raise events is scheduled */
96 #ifdef RX_ENABLE_LOCKS
98 /* rxdb_fileID is used to identify the lock location, along with line#. */
99 static int rxdb_fileID = RXDB_FILE_RX_EVENT;
100 #endif /* RX_LOCKS_DB */
101 #define RX_ENABLE_LOCKS 1
102 afs_kmutex_t rxevent_lock;
103 #endif /* RX_ENABLE_LOCKS */
105 #ifdef AFS_PTHREAD_ENV
107 * This mutex protects the following global variables:
108 * rxevent_initialized
112 afs_kmutex_t rx_event_mutex;
113 #define LOCK_EV_INIT MUTEX_ENTER(&rx_event_mutex)
114 #define UNLOCK_EV_INIT MUTEX_EXIT(&rx_event_mutex)
117 #define UNLOCK_EV_INIT
118 #endif /* AFS_PTHREAD_ENV */
122 rxevent_adjTimes(struct clock *adjTime)
124 /* backwards clock correction */
126 struct rxepoch *qep, *nqep;
127 struct rxevent *qev, *nqev;
129 for (queue_Scan(&rxepoch_queue, qep, nqep, rxepoch)) {
130 for (queue_Scan(&qep->events, qev, nqev, rxevent)) {
131 if (clock_Gt(&qev->eventTime, adjTime)) {
132 clock_Sub(&qev->eventTime, adjTime);
136 if (qep->epochSec > adjTime->sec) {
137 qep->epochSec -= adjTime->sec;
143 /* Pass in the number of events to allocate at a time */
144 int rxevent_initialized = 0;
146 rxevent_Init(int nEvents, void (*scheduler) (void))
149 if (rxevent_initialized) {
153 MUTEX_INIT(&rxevent_lock, "rxevent_lock", MUTEX_DEFAULT, 0);
156 rxevent_allocUnit = nEvents;
157 queue_Init(&rxevent_free);
158 queue_Init(&rxepoch_free);
159 queue_Init(&rxepoch_queue);
160 rxevent_nFree = rxevent_nPosted = 0;
162 rxevent_ScheduledEarlierEvent = scheduler;
163 rxevent_initialized = 1;
164 clock_Zero(&rxevent_nextRaiseEvents);
165 clock_Zero(&rxevent_lastEvent);
166 rxevent_raiseScheduled = 0;
170 /* Create and initialize new epoch structure */
172 rxepoch_Allocate(struct clock *when)
177 /* If we are short on free epoch entries, create a block of new oned
178 * and add them to the free queue */
179 if (queue_IsEmpty(&rxepoch_free)) {
180 #if defined(AFS_AIX32_ENV) && defined(KERNEL)
181 ep = (struct rxepoch *)rxi_Alloc(sizeof(struct rxepoch));
182 queue_Append(&rxepoch_free, &ep[0]), rxepoch_nFree++;
184 #if defined(KERNEL) && !defined(UKERNEL) && defined(AFS_FBSD80_ENV)
185 ep = (struct rxepoch *)
186 afs_osi_Alloc_NoSleep(sizeof(struct rxepoch) * rxepoch_allocUnit);
189 (struct xfreelist *)afs_osi_Alloc_NoSleep(sizeof(struct xfreelist));
191 ep = (struct rxepoch *)
192 osi_Alloc(sizeof(struct rxepoch) * rxepoch_allocUnit);
195 (struct xfreelist *)osi_Alloc(sizeof(struct xfreelist));
197 xfreemallocs->mem = (void *)ep;
198 xfreemallocs->size = sizeof(struct rxepoch) * rxepoch_allocUnit;
199 xfreemallocs->next = xsp;
200 for (i = 0; i < rxepoch_allocUnit; i++)
201 queue_Append(&rxepoch_free, &ep[i]), rxepoch_nFree++;
204 ep = queue_First(&rxepoch_free, rxepoch);
207 ep->epochSec = when->sec;
208 queue_Init(&ep->events);
212 /* Add the indicated event (function, arg) at the specified clock time. The
213 * "when" argument specifies when "func" should be called, in clock (clock.h)
216 static struct rxevent *
217 _rxevent_Post(struct clock *when, struct clock *now,
218 void (*func) (struct rxevent *, void *, void *, int),
219 void *arg, void *arg1, int arg2, int newargs)
221 struct rxevent *ev, *evqe, *evqpr;
222 struct rxepoch *ep, *epqe, *epqpr;
225 MUTEX_ENTER(&rxevent_lock);
229 clock_GetTime(&now1);
230 fprintf(rx_Log_event, "%d.%d: rxevent_Post(%d.%d, %lp, %lp, %lp, %d)\n",
231 (int)now1.sec, (int)now1.usec, (int)when->sec, (int)when->usec,
236 /* If a time was provided, check for consistency */
238 if (clock_Gt(&rxevent_lastEvent, now)) {
239 struct clock adjTime = rxevent_lastEvent;
240 clock_Sub(&adjTime, now);
241 rxevent_adjTimes(&adjTime);
243 rxevent_lastEvent = *now;
245 /* Get a pointer to the epoch for this event, if none is found then
246 * create a new epoch and insert it into the sorted list */
247 for (ep = NULL, queue_ScanBackwards(&rxepoch_queue, epqe, epqpr, rxepoch)) {
248 if (when->sec == epqe->epochSec) {
249 /* already have an structure for this epoch */
251 if (ep == queue_First(&rxepoch_queue, rxepoch))
254 } else if (when->sec > epqe->epochSec) {
255 /* Create a new epoch and insert after qe */
256 ep = rxepoch_Allocate(when);
257 queue_InsertAfter(epqe, ep);
262 /* Create a new epoch and place it at the head of the list */
263 ep = rxepoch_Allocate(when);
264 queue_Prepend(&rxepoch_queue, ep);
268 /* If we're short on free event entries, create a block of new ones and add
269 * them to the free queue */
270 if (queue_IsEmpty(&rxevent_free)) {
272 #if defined(AFS_AIX32_ENV) && defined(KERNEL)
273 ev = (struct rxevent *)rxi_Alloc(sizeof(struct rxevent));
274 queue_Append(&rxevent_free, &ev[0]), rxevent_nFree++;
277 #if defined(KERNEL) && !defined(UKERNEL) && defined(AFS_FBSD80_ENV)
278 ev = (struct rxevent *)afs_osi_Alloc_NoSleep(sizeof(struct rxevent) *
282 (struct xfreelist *)afs_osi_Alloc_NoSleep(sizeof(struct xfreelist));
284 ev = (struct rxevent *)osi_Alloc(sizeof(struct rxevent) *
288 (struct xfreelist *)osi_Alloc(sizeof(struct xfreelist));
290 xfreemallocs->mem = (void *)ev;
291 xfreemallocs->size = sizeof(struct rxevent) * rxevent_allocUnit;
292 xfreemallocs->next = xsp;
293 for (i = 0; i < rxevent_allocUnit; i++)
294 queue_Append(&rxevent_free, &ev[i]), rxevent_nFree++;
298 /* Grab and initialize a new rxevent structure */
299 ev = queue_First(&rxevent_free, rxevent);
303 /* Record user defined event state */
304 ev->eventTime = *when;
306 ev->func.newfunc = func;
308 ev->func.oldfunc = (void (*)(struct rxevent *, void *, void*))func;
313 ev->newargs = newargs;
314 rxevent_nPosted += 1; /* Rather than ++, to shut high-C up
315 * regarding never-set variables
318 /* Insert the event into the sorted list of events for this epoch */
319 for (queue_ScanBackwards(&ep->events, evqe, evqpr, rxevent)) {
320 if (when->usec >= evqe->eventTime.usec) {
321 /* Insert event after evqe */
322 queue_InsertAfter(evqe, ev);
323 MUTEX_EXIT(&rxevent_lock);
327 /* Insert event at head of current epoch */
328 queue_Prepend(&ep->events, ev);
329 if (isEarliest && rxevent_ScheduledEarlierEvent
330 && (!rxevent_raiseScheduled
331 || clock_Lt(&ev->eventTime, &rxevent_nextRaiseEvents))) {
332 rxevent_raiseScheduled = 1;
333 clock_Zero(&rxevent_nextRaiseEvents);
334 MUTEX_EXIT(&rxevent_lock);
335 /* Notify our external scheduler */
336 (*rxevent_ScheduledEarlierEvent) ();
337 MUTEX_ENTER(&rxevent_lock);
339 MUTEX_EXIT(&rxevent_lock);
344 rxevent_Post(struct clock *when,
345 void (*func) (struct rxevent *, void *, void *),
346 void *arg, void *arg1)
350 return _rxevent_Post(when, &now,
351 (void (*)(struct rxevent *, void *, void *, int))func,
356 rxevent_Post2(struct clock *when,
357 void (*func) (struct rxevent *, void *, void *, int),
358 void *arg, void *arg1, int arg2)
362 return _rxevent_Post(when, &now, func, arg, arg1, arg2, 1);
366 rxevent_PostNow(struct clock *when, struct clock *now,
367 void (*func) (struct rxevent *, void *, void *),
368 void *arg, void *arg1)
370 return _rxevent_Post(when, now,
371 (void (*)(struct rxevent *, void *, void *, int))func,
376 rxevent_PostNow2(struct clock *when, struct clock *now,
377 void (*func) (struct rxevent *, void *, void *, int),
378 void *arg, void *arg1, int arg2)
380 return _rxevent_Post(when, now, func, arg, arg1, arg2, 1);
383 /* Cancel an event by moving it from the event queue to the free list.
384 * Warning, the event must be on the event queue! If not, this should core
385 * dump (reference through 0). This routine should be called using the macro
386 * event_Cancel, which checks for a null event and also nulls the caller's
387 * event pointer after cancelling the event.
389 #ifdef RX_ENABLE_LOCKS
390 #ifdef RX_REFCOUNT_CHECK
391 int rxevent_Cancel_type = 0;
396 rxevent_Cancel_1(struct rxevent *ev, struct rx_call *call,
403 fprintf(rx_Log_event, "%d.%d: rxevent_Cancel_1(%d.%d, %lp, %lp)\n",
404 (int)now.sec, (int)now.usec, (int)ev->eventTime.sec,
405 (int)ev->eventTime.usec, ev->func.newfunc,
409 /* Append it to the free list (rather than prepending) to keep the free
410 * list hot so nothing pages out
412 MUTEX_ENTER(&rxevent_lock);
414 MUTEX_EXIT(&rxevent_lock);
417 #ifdef RX_ENABLE_LOCKS
418 /* It's possible we're currently processing this event. */
419 if (queue_IsOnQueue(ev)) {
420 queue_MoveAppend(&rxevent_free, ev);
425 #ifdef RX_REFCOUNT_CHECK
426 call->refCDebug[type]--;
427 if (call->refCDebug[type] < 0) {
428 rxevent_Cancel_type = type;
429 osi_Panic("rxevent_Cancel: call refCount < 0");
431 #endif /* RX_REFCOUNT_CHECK */
434 #else /* RX_ENABLE_LOCKS */
435 queue_MoveAppend(&rxevent_free, ev);
438 #endif /* RX_ENABLE_LOCKS */
439 MUTEX_EXIT(&rxevent_lock);
442 /* Process all epochs that have expired relative to the current clock time
443 * (which is not re-evaluated unless clock_NewTime has been called). The
444 * relative time to the next epoch is returned in the output parameter next
445 * and the function returns 1. If there are is no next epoch, the function
449 rxevent_RaiseEvents(struct clock *next)
453 volatile struct clock now;
454 MUTEX_ENTER(&rxevent_lock);
456 /* Events are sorted by time, so only scan until an event is found that has
457 * not yet timed out */
460 while (queue_IsNotEmpty(&rxepoch_queue)) {
461 ep = queue_First(&rxepoch_queue, rxepoch);
462 if (queue_IsEmpty(&ep->events)) {
464 queue_Append(&rxepoch_free, ep);
470 ev = queue_First(&ep->events, rxevent);
471 if (clock_Lt(&now, &ev->eventTime)) {
473 if (clock_Gt(&rxevent_lastEvent, &now)) {
474 struct clock adjTime = rxevent_lastEvent;
476 clock_Sub(&adjTime, &now);
477 adjusted = rxevent_adjTimes(&adjTime);
478 rxevent_lastEvent = now;
482 if (clock_Lt(&now, &ev->eventTime)) {
483 *next = rxevent_nextRaiseEvents = ev->eventTime;
484 rxevent_raiseScheduled = 1;
485 clock_Sub(next, &now);
486 MUTEX_EXIT(&rxevent_lock);
492 MUTEX_EXIT(&rxevent_lock);
494 ev->func.newfunc(ev, ev->arg, ev->arg1, ev->arg2);
496 ev->func.oldfunc(ev, ev->arg, ev->arg1);
498 MUTEX_ENTER(&rxevent_lock);
499 queue_Append(&rxevent_free, ev);
501 } while (queue_IsNotEmpty(&ep->events));
505 fprintf(rx_Log_event, "rxevent_RaiseEvents(%d.%d)\n", (int)now.sec,
508 rxevent_raiseScheduled = 0;
509 MUTEX_EXIT(&rxevent_lock);
514 shutdown_rxevent(void)
516 struct xfreelist *xp, *nxp;
519 if (!rxevent_initialized) {
523 rxevent_initialized = 0;
525 MUTEX_DESTROY(&rxevent_lock);
526 #if defined(AFS_AIX32_ENV) && defined(KERNEL)
527 /* Everything is freed in afs_osinet.c */
532 osi_Free((char *)xp->mem, xp->size);
533 osi_Free((char *)xp, sizeof(struct xfreelist));