2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
12 #include "afs/param.h"
14 #include <afs/param.h>
18 #include <sys/time_impl.h>
26 #include "afs/afs_osi.h"
28 #include "afs/sysincludes.h"
29 #include "afsincludes.h"
31 #include "rx/rx_clock.h"
32 #include "rx/rx_queue.h"
33 #include "rx/rx_event.h"
34 #include "rx/rx_kernel.h"
35 #include "rx_kmutex.h"
36 #ifdef RX_ENABLE_LOCKS
37 #include "rx/rx_internal.h"
39 #endif /* RX_ENABLE_LOCKS */
40 #include "rx/rx_globals.h"
41 #if defined(AFS_SGI_ENV)
42 #include "sys/debug.h"
43 /* These are necessary to get curproc (used by GLOCK asserts) to work. */
45 #if !defined(AFS_SGI64_ENV) && !defined(UKERNEL)
48 extern void *osi_Alloc();
50 #if defined(AFS_OBSD_ENV)
59 #ifdef AFS_PTHREAD_ENV
60 #include <rx/rx_pthread.h>
64 #ifdef RX_ENABLE_LOCKS
65 #include "rx_internal.h"
67 #endif /* RX_ENABLE_LOCKS */
68 #include "rx_globals.h"
75 /* All event processing is relative to the apparent current time given by clock_GetTime */
77 /* This should be static, but event_test wants to look at the free list... */
78 struct rx_queue rxevent_free; /* It's somewhat bogus to use a doubly-linked queue for the free list */
79 struct rx_queue rxepoch_free; /* It's somewhat bogus to use a doubly-linked queue for the free list */
80 static struct rx_queue rxepoch_queue; /* list of waiting epochs */
81 static int rxevent_allocUnit = 10; /* Allocation unit (number of event records to allocate at one time) */
82 static int rxepoch_allocUnit = 10; /* Allocation unit (number of epoch records to allocate at one time) */
83 int rxevent_nFree; /* Number of free event records */
84 int rxevent_nPosted; /* Current number of posted events */
85 int rxepoch_nFree; /* Number of free epoch records */
86 static void (*rxevent_ScheduledEarlierEvent) (void); /* Proc to call when an event is scheduled that is earlier than all other events */
90 struct xfreelist *next;
92 static struct xfreelist *xfreemallocs = 0, *xsp = 0;
94 struct clock rxevent_nextRaiseEvents; /* Time of next call to raise events */
95 struct clock rxevent_lastEvent; /* backwards time detection */
96 int rxevent_raiseScheduled; /* true if raise events is scheduled */
98 #ifdef RX_ENABLE_LOCKS
100 /* rxdb_fileID is used to identify the lock location, along with line#. */
101 static int rxdb_fileID = RXDB_FILE_RX_EVENT;
102 #endif /* RX_LOCKS_DB */
103 #define RX_ENABLE_LOCKS 1
104 afs_kmutex_t rxevent_lock;
105 #endif /* RX_ENABLE_LOCKS */
107 #ifdef AFS_PTHREAD_ENV
109 * This mutex protects the following global variables:
110 * rxevent_initialized
114 afs_kmutex_t rx_event_mutex;
115 #define LOCK_EV_INIT MUTEX_ENTER(&rx_event_mutex)
116 #define UNLOCK_EV_INIT MUTEX_EXIT(&rx_event_mutex)
119 #define UNLOCK_EV_INIT
120 #endif /* AFS_PTHREAD_ENV */
124 rxevent_adjTimes(struct clock *adjTime)
126 /* backwards clock correction */
128 struct rxepoch *qep, *nqep;
129 struct rxevent *qev, *nqev;
131 for (queue_Scan(&rxepoch_queue, qep, nqep, rxepoch)) {
132 for (queue_Scan(&qep->events, qev, nqev, rxevent)) {
133 if (clock_Gt(&qev->eventTime, adjTime)) {
134 clock_Sub(&qev->eventTime, adjTime);
138 if (qep->epochSec > adjTime->sec) {
139 qep->epochSec -= adjTime->sec;
145 /* Pass in the number of events to allocate at a time */
146 int rxevent_initialized = 0;
148 rxevent_Init(int nEvents, void (*scheduler) (void))
151 if (rxevent_initialized) {
155 MUTEX_INIT(&rxevent_lock, "rxevent_lock", MUTEX_DEFAULT, 0);
158 rxevent_allocUnit = nEvents;
159 queue_Init(&rxevent_free);
160 queue_Init(&rxepoch_free);
161 queue_Init(&rxepoch_queue);
162 rxevent_nFree = rxevent_nPosted = 0;
164 rxevent_ScheduledEarlierEvent = scheduler;
165 rxevent_initialized = 1;
166 clock_Zero(&rxevent_nextRaiseEvents);
167 clock_Zero(&rxevent_lastEvent);
168 rxevent_raiseScheduled = 0;
172 /* Create and initialize new epoch structure */
174 rxepoch_Allocate(struct clock *when)
179 /* If we are short on free epoch entries, create a block of new oned
180 * and add them to the free queue */
181 if (queue_IsEmpty(&rxepoch_free)) {
182 #if defined(AFS_AIX32_ENV) && defined(KERNEL)
183 ep = (struct rxepoch *)rxi_Alloc(sizeof(struct rxepoch));
184 queue_Append(&rxepoch_free, &ep[0]), rxepoch_nFree++;
186 #if defined(KERNEL) && !defined(UKERNEL) && defined(AFS_FBSD80_ENV)
187 ep = (struct rxepoch *)
188 afs_osi_Alloc_NoSleep(sizeof(struct rxepoch) * rxepoch_allocUnit);
191 (struct xfreelist *)afs_osi_Alloc_NoSleep(sizeof(struct xfreelist));
193 ep = (struct rxepoch *)
194 osi_Alloc(sizeof(struct rxepoch) * rxepoch_allocUnit);
197 (struct xfreelist *)osi_Alloc(sizeof(struct xfreelist));
199 xfreemallocs->mem = (void *)ep;
200 xfreemallocs->size = sizeof(struct rxepoch) * rxepoch_allocUnit;
201 xfreemallocs->next = xsp;
202 for (i = 0; i < rxepoch_allocUnit; i++)
203 queue_Append(&rxepoch_free, &ep[i]), rxepoch_nFree++;
206 ep = queue_First(&rxepoch_free, rxepoch);
209 ep->epochSec = when->sec;
210 queue_Init(&ep->events);
214 /* Add the indicated event (function, arg) at the specified clock time. The
215 * "when" argument specifies when "func" should be called, in clock (clock.h)
218 static struct rxevent *
219 _rxevent_Post(struct clock *when, struct clock *now,
220 void (*func) (struct rxevent *, void *, void *, int),
221 void *arg, void *arg1, int arg2, int newargs)
223 register struct rxevent *ev, *evqe, *evqpr;
224 register struct rxepoch *ep, *epqe, *epqpr;
227 MUTEX_ENTER(&rxevent_lock);
231 clock_GetTime(&now1);
232 fprintf(rx_Log_event, "%d.%d: rxevent_Post(%d.%d, %lp, %lp, %lp, %d)\n",
233 (int)now1.sec, (int)now1.usec, (int)when->sec, (int)when->usec,
238 /* If a time was provided, check for consistency */
240 if (clock_Gt(&rxevent_lastEvent, now)) {
241 struct clock adjTime = rxevent_lastEvent;
242 clock_Sub(&adjTime, now);
243 rxevent_adjTimes(&adjTime);
245 rxevent_lastEvent = *now;
247 /* Get a pointer to the epoch for this event, if none is found then
248 * create a new epoch and insert it into the sorted list */
249 for (ep = NULL, queue_ScanBackwards(&rxepoch_queue, epqe, epqpr, rxepoch)) {
250 if (when->sec == epqe->epochSec) {
251 /* already have an structure for this epoch */
253 if (ep == queue_First(&rxepoch_queue, rxepoch))
256 } else if (when->sec > epqe->epochSec) {
257 /* Create a new epoch and insert after qe */
258 ep = rxepoch_Allocate(when);
259 queue_InsertAfter(epqe, ep);
264 /* Create a new epoch and place it at the head of the list */
265 ep = rxepoch_Allocate(when);
266 queue_Prepend(&rxepoch_queue, ep);
270 /* If we're short on free event entries, create a block of new ones and add
271 * them to the free queue */
272 if (queue_IsEmpty(&rxevent_free)) {
274 #if defined(AFS_AIX32_ENV) && defined(KERNEL)
275 ev = (struct rxevent *)rxi_Alloc(sizeof(struct rxevent));
276 queue_Append(&rxevent_free, &ev[0]), rxevent_nFree++;
279 #if defined(KERNEL) && !defined(UKERNEL) && defined(AFS_FBSD80_ENV)
280 ev = (struct rxevent *)afs_osi_Alloc_NoSleep(sizeof(struct rxevent) *
284 (struct xfreelist *)afs_osi_Alloc_NoSleep(sizeof(struct xfreelist));
286 ev = (struct rxevent *)osi_Alloc(sizeof(struct rxevent) *
290 (struct xfreelist *)osi_Alloc(sizeof(struct xfreelist));
292 xfreemallocs->mem = (void *)ev;
293 xfreemallocs->size = sizeof(struct rxevent) * rxevent_allocUnit;
294 xfreemallocs->next = xsp;
295 for (i = 0; i < rxevent_allocUnit; i++)
296 queue_Append(&rxevent_free, &ev[i]), rxevent_nFree++;
300 /* Grab and initialize a new rxevent structure */
301 ev = queue_First(&rxevent_free, rxevent);
305 /* Record user defined event state */
306 ev->eventTime = *when;
308 ev->func.newfunc = func;
310 ev->func.oldfunc = (void (*)(struct rxevent *, void *, void*))func;
315 ev->newargs = newargs;
316 rxevent_nPosted += 1; /* Rather than ++, to shut high-C up
317 * regarding never-set variables
320 /* Insert the event into the sorted list of events for this epoch */
321 for (queue_ScanBackwards(&ep->events, evqe, evqpr, rxevent)) {
322 if (when->usec >= evqe->eventTime.usec) {
323 /* Insert event after evqe */
324 queue_InsertAfter(evqe, ev);
325 MUTEX_EXIT(&rxevent_lock);
329 /* Insert event at head of current epoch */
330 queue_Prepend(&ep->events, ev);
331 if (isEarliest && rxevent_ScheduledEarlierEvent
332 && (!rxevent_raiseScheduled
333 || clock_Lt(&ev->eventTime, &rxevent_nextRaiseEvents))) {
334 rxevent_raiseScheduled = 1;
335 clock_Zero(&rxevent_nextRaiseEvents);
336 MUTEX_EXIT(&rxevent_lock);
337 /* Notify our external scheduler */
338 (*rxevent_ScheduledEarlierEvent) ();
339 MUTEX_ENTER(&rxevent_lock);
341 MUTEX_EXIT(&rxevent_lock);
346 rxevent_Post(struct clock *when,
347 void (*func) (struct rxevent *, void *, void *),
348 void *arg, void *arg1)
352 return _rxevent_Post(when, &now,
353 (void (*)(struct rxevent *, void *, void *, int))func,
358 rxevent_Post2(struct clock *when,
359 void (*func) (struct rxevent *, void *, void *, int),
360 void *arg, void *arg1, int arg2)
364 return _rxevent_Post(when, &now, func, arg, arg1, arg2, 1);
368 rxevent_PostNow(struct clock *when, struct clock *now,
369 void (*func) (struct rxevent *, void *, void *),
370 void *arg, void *arg1)
372 return _rxevent_Post(when, now,
373 (void (*)(struct rxevent *, void *, void *, int))func,
378 rxevent_PostNow2(struct clock *when, struct clock *now,
379 void (*func) (struct rxevent *, void *, void *, int),
380 void *arg, void *arg1, int arg2)
382 return _rxevent_Post(when, now, func, arg, arg1, arg2, 1);
385 /* Cancel an event by moving it from the event queue to the free list.
386 * Warning, the event must be on the event queue! If not, this should core
387 * dump (reference through 0). This routine should be called using the macro
388 * event_Cancel, which checks for a null event and also nulls the caller's
389 * event pointer after cancelling the event.
391 #ifdef RX_ENABLE_LOCKS
392 #ifdef RX_REFCOUNT_CHECK
393 int rxevent_Cancel_type = 0;
398 rxevent_Cancel_1(register struct rxevent *ev, register struct rx_call *call,
405 fprintf(rx_Log_event, "%d.%d: rxevent_Cancel_1(%d.%d, %lp, %lp)\n",
406 (int)now.sec, (int)now.usec, (int)ev->eventTime.sec,
407 (int)ev->eventTime.usec, ev->func.newfunc,
411 /* Append it to the free list (rather than prepending) to keep the free
412 * list hot so nothing pages out
414 MUTEX_ENTER(&rxevent_lock);
416 MUTEX_EXIT(&rxevent_lock);
419 #ifdef RX_ENABLE_LOCKS
420 /* It's possible we're currently processing this event. */
421 if (queue_IsOnQueue(ev)) {
422 queue_MoveAppend(&rxevent_free, ev);
427 #ifdef RX_REFCOUNT_CHECK
428 call->refCDebug[type]--;
429 if (call->refCDebug[type] < 0) {
430 rxevent_Cancel_type = type;
431 osi_Panic("rxevent_Cancel: call refCount < 0");
433 #endif /* RX_REFCOUNT_CHECK */
436 #else /* RX_ENABLE_LOCKS */
437 queue_MoveAppend(&rxevent_free, ev);
440 #endif /* RX_ENABLE_LOCKS */
441 MUTEX_EXIT(&rxevent_lock);
444 /* Process all epochs that have expired relative to the current clock time
445 * (which is not re-evaluated unless clock_NewTime has been called). The
446 * relative time to the next epoch is returned in the output parameter next
447 * and the function returns 1. If there are is no next epoch, the function
451 rxevent_RaiseEvents(struct clock *next)
453 register struct rxepoch *ep;
454 register struct rxevent *ev;
455 volatile struct clock now;
456 MUTEX_ENTER(&rxevent_lock);
458 /* Events are sorted by time, so only scan until an event is found that has
459 * not yet timed out */
462 while (queue_IsNotEmpty(&rxepoch_queue)) {
463 ep = queue_First(&rxepoch_queue, rxepoch);
464 if (queue_IsEmpty(&ep->events)) {
466 queue_Append(&rxepoch_free, ep);
472 ev = queue_First(&ep->events, rxevent);
473 if (clock_Lt(&now, &ev->eventTime)) {
475 if (clock_Gt(&rxevent_lastEvent, &now)) {
476 struct clock adjTime = rxevent_lastEvent;
478 clock_Sub(&adjTime, &now);
479 adjusted = rxevent_adjTimes(&adjTime);
480 rxevent_lastEvent = now;
484 if (clock_Lt(&now, &ev->eventTime)) {
485 *next = rxevent_nextRaiseEvents = ev->eventTime;
486 rxevent_raiseScheduled = 1;
487 clock_Sub(next, &now);
488 MUTEX_EXIT(&rxevent_lock);
494 MUTEX_EXIT(&rxevent_lock);
496 ev->func.newfunc(ev, ev->arg, ev->arg1, ev->arg2);
498 ev->func.oldfunc(ev, ev->arg, ev->arg1);
500 MUTEX_ENTER(&rxevent_lock);
501 queue_Append(&rxevent_free, ev);
503 } while (queue_IsNotEmpty(&ep->events));
507 fprintf(rx_Log_event, "rxevent_RaiseEvents(%d.%d)\n", (int)now.sec,
510 rxevent_raiseScheduled = 0;
511 MUTEX_EXIT(&rxevent_lock);
516 shutdown_rxevent(void)
518 struct xfreelist *xp, *nxp;
521 if (!rxevent_initialized) {
525 rxevent_initialized = 0;
527 MUTEX_DESTROY(&rxevent_lock);
528 #if defined(AFS_AIX32_ENV) && defined(KERNEL)
529 /* Everything is freed in afs_osinet.c */
534 osi_Free((char *)xp->mem, xp->size);
535 osi_Free((char *)xp, sizeof(struct xfreelist));