2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
12 #include "afs/param.h"
14 #include <afs/param.h>
18 #include <sys/time_impl.h>
24 #include "afs/afs_osi.h"
26 #include "afs/sysincludes.h"
27 #include "afsincludes.h"
29 #include "rx/rx_clock.h"
30 #include "rx/rx_queue.h"
31 #include "rx/rx_event.h"
32 #include "rx/rx_kernel.h"
33 #include "rx_kmutex.h"
34 #ifdef RX_ENABLE_LOCKS
36 #endif /* RX_ENABLE_LOCKS */
37 #include "rx/rx_globals.h"
38 #if defined(AFS_SGI_ENV)
39 #include "sys/debug.h"
40 /* These are necessary to get curproc (used by GLOCK asserts) to work. */
42 #if !defined(AFS_SGI64_ENV) && !defined(UKERNEL)
45 extern void *osi_Alloc();
47 #if defined(AFS_OBSD_ENV)
56 #ifdef AFS_PTHREAD_ENV
57 #include <rx/rx_pthread.h>
61 #ifdef RX_ENABLE_LOCKS
63 #endif /* RX_ENABLE_LOCKS */
64 #include "rx_globals.h"
71 /* All event processing is relative to the apparent current time given by clock_GetTime */
73 /* This should be static, but event_test wants to look at the free list... */
74 struct rx_queue rxevent_free; /* It's somewhat bogus to use a doubly-linked queue for the free list */
75 struct rx_queue rxepoch_free; /* It's somewhat bogus to use a doubly-linked queue for the free list */
76 static struct rx_queue rxepoch_queue; /* list of waiting epochs */
77 static int rxevent_allocUnit = 10; /* Allocation unit (number of event records to allocate at one time) */
78 static int rxepoch_allocUnit = 10; /* Allocation unit (number of epoch records to allocate at one time) */
79 int rxevent_nFree; /* Number of free event records */
80 int rxevent_nPosted; /* Current number of posted events */
81 int rxepoch_nFree; /* Number of free epoch records */
82 static void (*rxevent_ScheduledEarlierEvent) (void); /* Proc to call when an event is scheduled that is earlier than all other events */
86 struct xfreelist *next;
88 static struct xfreelist *xfreemallocs = 0, *xsp = 0;
90 struct clock rxevent_nextRaiseEvents; /* Time of next call to raise events */
91 struct clock rxevent_lastEvent; /* backwards time detection */
92 int rxevent_raiseScheduled; /* true if raise events is scheduled */
94 #ifdef RX_ENABLE_LOCKS
96 /* rxdb_fileID is used to identify the lock location, along with line#. */
97 static int rxdb_fileID = RXDB_FILE_RX_EVENT;
98 #endif /* RX_LOCKS_DB */
99 #define RX_ENABLE_LOCKS 1
100 afs_kmutex_t rxevent_lock;
101 #endif /* RX_ENABLE_LOCKS */
103 #ifdef AFS_PTHREAD_ENV
105 * This mutex protects the following global variables:
106 * rxevent_initialized
109 afs_kmutex_t rx_event_mutex;
110 #define LOCK_EV_INIT MUTEX_ENTER(&rx_event_mutex)
111 #define UNLOCK_EV_INIT MUTEX_EXIT(&rx_event_mutex)
114 #define UNLOCK_EV_INIT
115 #endif /* AFS_PTHREAD_ENV */
119 rxevent_adjTimes(struct clock *adjTime)
121 /* backwards clock correction */
123 struct rxepoch *qep, *nqep;
124 struct rxevent *qev, *nqev;
126 for (queue_Scan(&rxepoch_queue, qep, nqep, rxepoch)) {
127 for (queue_Scan(&qep->events, qev, nqev, rxevent)) {
128 if (clock_Gt(&qev->eventTime, adjTime)) {
129 clock_Sub(&qev->eventTime, adjTime);
133 if (qep->epochSec > adjTime->sec) {
134 qep->epochSec -= adjTime->sec;
140 /* Pass in the number of events to allocate at a time */
141 int rxevent_initialized = 0;
143 rxevent_Init(int nEvents, void (*scheduler) (void))
146 if (rxevent_initialized) {
150 MUTEX_INIT(&rxevent_lock, "rxevent_lock", MUTEX_DEFAULT, 0);
153 rxevent_allocUnit = nEvents;
154 queue_Init(&rxevent_free);
155 queue_Init(&rxepoch_free);
156 queue_Init(&rxepoch_queue);
157 rxevent_nFree = rxevent_nPosted = 0;
159 rxevent_ScheduledEarlierEvent = scheduler;
160 rxevent_initialized = 1;
161 clock_Zero(&rxevent_nextRaiseEvents);
162 clock_Zero(&rxevent_lastEvent);
163 rxevent_raiseScheduled = 0;
167 /* Create and initialize new epoch structure */
169 rxepoch_Allocate(struct clock *when)
174 /* If we are short on free epoch entries, create a block of new oned
175 * and add them to the free queue */
176 if (queue_IsEmpty(&rxepoch_free)) {
177 #if defined(AFS_AIX32_ENV) && defined(KERNEL)
178 ep = rxi_Alloc(sizeof(struct rxepoch));
179 queue_Append(&rxepoch_free, &ep[0]), rxepoch_nFree++;
181 #if defined(KERNEL) && !defined(UKERNEL) && defined(AFS_FBSD80_ENV)
182 ep = (struct rxepoch *)
183 afs_osi_Alloc_NoSleep(sizeof(struct rxepoch) * rxepoch_allocUnit);
186 (struct xfreelist *)afs_osi_Alloc_NoSleep(sizeof(struct xfreelist));
188 ep = (struct rxepoch *)
189 osi_Alloc(sizeof(struct rxepoch) * rxepoch_allocUnit);
192 (struct xfreelist *)osi_Alloc(sizeof(struct xfreelist));
194 xfreemallocs->mem = (void *)ep;
195 xfreemallocs->size = sizeof(struct rxepoch) * rxepoch_allocUnit;
196 xfreemallocs->next = xsp;
197 for (i = 0; i < rxepoch_allocUnit; i++)
198 queue_Append(&rxepoch_free, &ep[i]), rxepoch_nFree++;
201 ep = queue_First(&rxepoch_free, rxepoch);
204 ep->epochSec = when->sec;
205 queue_Init(&ep->events);
209 /* Add the indicated event (function, arg) at the specified clock time. The
210 * "when" argument specifies when "func" should be called, in clock (clock.h)
213 static struct rxevent *
214 _rxevent_Post(struct clock *when, struct clock *now,
215 void (*func) (struct rxevent *, void *, void *, int),
216 void *arg, void *arg1, int arg2, int newargs)
218 struct rxevent *ev, *evqe, *evqpr;
219 struct rxepoch *ep, *epqe, *epqpr;
222 MUTEX_ENTER(&rxevent_lock);
226 clock_GetTime(&now1);
227 fprintf(rx_Log_event, "%ld.%ld: rxevent_Post(%ld.%ld, "
228 "%"AFS_PTR_FMT", %"AFS_PTR_FMT", "
229 "%"AFS_PTR_FMT", %d)\n",
230 afs_printable_int32_ld(now1.sec),
231 afs_printable_int32_ld(now1.usec),
232 afs_printable_int32_ld(when->sec),
233 afs_printable_int32_ld(when->usec),
238 /* If a time was provided, check for consistency */
240 if (clock_Gt(&rxevent_lastEvent, now)) {
241 struct clock adjTime = rxevent_lastEvent;
242 clock_Sub(&adjTime, now);
243 rxevent_adjTimes(&adjTime);
245 rxevent_lastEvent = *now;
247 /* Get a pointer to the epoch for this event, if none is found then
248 * create a new epoch and insert it into the sorted list */
249 for (ep = NULL, queue_ScanBackwards(&rxepoch_queue, epqe, epqpr, rxepoch)) {
250 if (when->sec == epqe->epochSec) {
251 /* already have an structure for this epoch */
253 if (ep == queue_First(&rxepoch_queue, rxepoch))
256 } else if (when->sec > epqe->epochSec) {
257 /* Create a new epoch and insert after qe */
258 ep = rxepoch_Allocate(when);
259 queue_InsertAfter(epqe, ep);
264 /* Create a new epoch and place it at the head of the list */
265 ep = rxepoch_Allocate(when);
266 queue_Prepend(&rxepoch_queue, ep);
270 /* If we're short on free event entries, create a block of new ones and add
271 * them to the free queue */
272 if (queue_IsEmpty(&rxevent_free)) {
274 #if defined(AFS_AIX32_ENV) && defined(KERNEL)
275 ev = rxi_Alloc(sizeof(struct rxevent));
276 queue_Append(&rxevent_free, &ev[0]), rxevent_nFree++;
279 #if defined(KERNEL) && !defined(UKERNEL) && defined(AFS_FBSD80_ENV)
280 ev = (struct rxevent *)afs_osi_Alloc_NoSleep(sizeof(struct rxevent) *
284 (struct xfreelist *)afs_osi_Alloc_NoSleep(sizeof(struct xfreelist));
286 ev = (struct rxevent *)osi_Alloc(sizeof(struct rxevent) *
290 (struct xfreelist *)osi_Alloc(sizeof(struct xfreelist));
292 xfreemallocs->mem = (void *)ev;
293 xfreemallocs->size = sizeof(struct rxevent) * rxevent_allocUnit;
294 xfreemallocs->next = xsp;
295 for (i = 0; i < rxevent_allocUnit; i++)
296 queue_Append(&rxevent_free, &ev[i]), rxevent_nFree++;
300 /* Grab and initialize a new rxevent structure */
301 ev = queue_First(&rxevent_free, rxevent);
305 /* Record user defined event state */
306 ev->eventTime = *when;
308 ev->func.newfunc = func;
310 ev->func.oldfunc = (void (*)(struct rxevent *, void *, void*))func;
315 ev->newargs = newargs;
316 rxevent_nPosted += 1; /* Rather than ++, to shut high-C up
317 * regarding never-set variables
320 /* Insert the event into the sorted list of events for this epoch */
321 for (queue_ScanBackwards(&ep->events, evqe, evqpr, rxevent)) {
322 if (when->usec >= evqe->eventTime.usec) {
323 /* Insert event after evqe */
324 queue_InsertAfter(evqe, ev);
325 MUTEX_EXIT(&rxevent_lock);
329 /* Insert event at head of current epoch */
330 queue_Prepend(&ep->events, ev);
331 if (isEarliest && rxevent_ScheduledEarlierEvent
332 && (!rxevent_raiseScheduled
333 || clock_Lt(&ev->eventTime, &rxevent_nextRaiseEvents))) {
334 rxevent_raiseScheduled = 1;
335 clock_Zero(&rxevent_nextRaiseEvents);
336 MUTEX_EXIT(&rxevent_lock);
337 /* Notify our external scheduler */
338 (*rxevent_ScheduledEarlierEvent) ();
339 MUTEX_ENTER(&rxevent_lock);
341 MUTEX_EXIT(&rxevent_lock);
346 rxevent_Post(struct clock *when,
347 void (*func) (struct rxevent *, void *, void *),
348 void *arg, void *arg1)
352 return _rxevent_Post(when, &now,
353 (void (*)(struct rxevent *, void *, void *, int))func,
358 rxevent_Post2(struct clock *when,
359 void (*func) (struct rxevent *, void *, void *, int),
360 void *arg, void *arg1, int arg2)
364 return _rxevent_Post(when, &now, func, arg, arg1, arg2, 1);
368 rxevent_PostNow(struct clock *when, struct clock *now,
369 void (*func) (struct rxevent *, void *, void *),
370 void *arg, void *arg1)
372 return _rxevent_Post(when, now,
373 (void (*)(struct rxevent *, void *, void *, int))func,
378 rxevent_PostNow2(struct clock *when, struct clock *now,
379 void (*func) (struct rxevent *, void *, void *, int),
380 void *arg, void *arg1, int arg2)
382 return _rxevent_Post(when, now, func, arg, arg1, arg2, 1);
385 /* Cancel an event by moving it from the event queue to the free list.
386 * Warning, the event must be on the event queue! If not, this should core
387 * dump (reference through 0). This routine should be called using the macro
388 * event_Cancel, which checks for a null event and also nulls the caller's
389 * event pointer after cancelling the event.
391 #ifdef RX_ENABLE_LOCKS
392 #ifdef RX_REFCOUNT_CHECK
393 int rxevent_Cancel_type = 0;
398 rxevent_Cancel_1(struct rxevent *ev, struct rx_call *call,
405 fprintf(rx_Log_event, "%d.%d: rxevent_Cancel_1(%d.%d, %"
406 AFS_PTR_FMT ", %p" AFS_PTR_FMT ")\n",
407 (int)now.sec, (int)now.usec, (int)ev->eventTime.sec,
408 (int)ev->eventTime.usec, ev->func.newfunc,
412 /* Append it to the free list (rather than prepending) to keep the free
413 * list hot so nothing pages out
415 MUTEX_ENTER(&rxevent_lock);
417 MUTEX_EXIT(&rxevent_lock);
420 #ifdef RX_ENABLE_LOCKS
421 /* It's possible we're currently processing this event. */
422 if (queue_IsOnQueue(ev)) {
423 queue_MoveAppend(&rxevent_free, ev);
428 #ifdef RX_REFCOUNT_CHECK
429 call->refCDebug[type]--;
430 if (call->refCDebug[type] < 0) {
431 rxevent_Cancel_type = type;
432 osi_Panic("rxevent_Cancel: call refCount < 0");
434 #endif /* RX_REFCOUNT_CHECK */
437 #else /* RX_ENABLE_LOCKS */
438 queue_MoveAppend(&rxevent_free, ev);
441 #endif /* RX_ENABLE_LOCKS */
442 MUTEX_EXIT(&rxevent_lock);
445 /* Process all epochs that have expired relative to the current clock time
446 * (which is not re-evaluated unless clock_NewTime has been called). The
447 * relative time to the next epoch is returned in the output parameter next
448 * and the function returns 1. If there are is no next epoch, the function
452 rxevent_RaiseEvents(struct clock *next)
456 volatile struct clock now;
457 MUTEX_ENTER(&rxevent_lock);
459 /* Events are sorted by time, so only scan until an event is found that has
460 * not yet timed out */
463 while (queue_IsNotEmpty(&rxepoch_queue)) {
464 ep = queue_First(&rxepoch_queue, rxepoch);
465 if (queue_IsEmpty(&ep->events)) {
467 queue_Append(&rxepoch_free, ep);
473 ev = queue_First(&ep->events, rxevent);
474 if (clock_Lt(&now, &ev->eventTime)) {
476 if (clock_Gt(&rxevent_lastEvent, &now)) {
477 struct clock adjTime = rxevent_lastEvent;
479 clock_Sub(&adjTime, &now);
480 adjusted = rxevent_adjTimes(&adjTime);
481 rxevent_lastEvent = now;
485 if (clock_Lt(&now, &ev->eventTime)) {
486 *next = rxevent_nextRaiseEvents = ev->eventTime;
487 rxevent_raiseScheduled = 1;
488 clock_Sub(next, &now);
489 MUTEX_EXIT(&rxevent_lock);
495 MUTEX_EXIT(&rxevent_lock);
497 ev->func.newfunc(ev, ev->arg, ev->arg1, ev->arg2);
499 ev->func.oldfunc(ev, ev->arg, ev->arg1);
501 MUTEX_ENTER(&rxevent_lock);
502 queue_Append(&rxevent_free, ev);
504 } while (queue_IsNotEmpty(&ep->events));
508 fprintf(rx_Log_event, "rxevent_RaiseEvents(%d.%d)\n", (int)now.sec,
511 rxevent_raiseScheduled = 0;
512 MUTEX_EXIT(&rxevent_lock);
517 shutdown_rxevent(void)
519 struct xfreelist *xp, *nxp;
522 if (!rxevent_initialized) {
526 rxevent_initialized = 0;
528 MUTEX_DESTROY(&rxevent_lock);
529 #if defined(AFS_AIX32_ENV) && defined(KERNEL)
530 /* Everything is freed in afs_osinet.c */
535 osi_Free((char *)xp->mem, xp->size);
536 osi_Free((char *)xp, sizeof(struct xfreelist));