2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include <afs/param.h>
14 # include <sys/time_impl.h>
19 # include "afs/afs_osi.h"
21 # include "afs/sysincludes.h"
22 # include "afsincludes.h"
23 # endif /* !UKERNEL */
24 # include "rx_kernel.h"
25 # include "rx_kmutex.h"
26 # if defined(AFS_SGI_ENV)
27 # include "sys/debug.h"
28 /* These are necessary to get curproc (used by GLOCK asserts) to work. */
30 # if !defined(AFS_SGI64_ENV) && !defined(UKERNEL)
33 extern void *osi_Alloc();
35 # if defined(AFS_OBSD_ENV)
36 # if defined(AFS_OBSD48_ENV)
45 # ifdef AFS_PTHREAD_ENV
46 # include "rx_pthread.h"
56 #include "rx_globals.h"
58 /* All event processing is relative to the apparent current time given by clock_GetTime */
60 /* This should be static, but event_test wants to look at the free list... */
61 struct rx_queue rxevent_free; /* It's somewhat bogus to use a doubly-linked queue for the free list */
62 struct rx_queue rxepoch_free; /* It's somewhat bogus to use a doubly-linked queue for the free list */
63 static struct rx_queue rxepoch_queue; /* list of waiting epochs */
64 static int rxevent_allocUnit = 10; /* Allocation unit (number of event records to allocate at one time) */
65 static int rxepoch_allocUnit = 10; /* Allocation unit (number of epoch records to allocate at one time) */
66 int rxevent_nFree; /* Number of free event records */
67 int rxevent_nPosted; /* Current number of posted events */
68 int rxepoch_nFree; /* Number of free epoch records */
69 static void (*rxevent_ScheduledEarlierEvent) (void); /* Proc to call when an event is scheduled that is earlier than all other events */
73 struct xfreelist *next;
75 static struct xfreelist *xfreemallocs = 0, *xsp = 0;
77 struct clock rxevent_nextRaiseEvents; /* Time of next call to raise events */
78 struct clock rxevent_lastEvent; /* backwards time detection */
79 int rxevent_raiseScheduled; /* true if raise events is scheduled */
81 #ifdef RX_ENABLE_LOCKS
83 /* rxdb_fileID is used to identify the lock location, along with line#. */
84 static int rxdb_fileID = RXDB_FILE_RX_EVENT;
85 #endif /* RX_LOCKS_DB */
86 #define RX_ENABLE_LOCKS 1
87 afs_kmutex_t rxevent_lock;
88 #endif /* RX_ENABLE_LOCKS */
90 #ifdef AFS_PTHREAD_ENV
92 * This mutex protects the following global variables:
96 afs_kmutex_t rx_event_mutex;
97 #define LOCK_EV_INIT MUTEX_ENTER(&rx_event_mutex)
98 #define UNLOCK_EV_INIT MUTEX_EXIT(&rx_event_mutex)
101 #define UNLOCK_EV_INIT
102 #endif /* AFS_PTHREAD_ENV */
106 rxevent_adjTimes(struct clock *adjTime)
108 /* backwards clock correction */
110 struct rxepoch *qep, *nqep;
111 struct rxevent *qev, *nqev;
113 for (queue_Scan(&rxepoch_queue, qep, nqep, rxepoch)) {
114 for (queue_Scan(&qep->events, qev, nqev, rxevent)) {
115 if (clock_Gt(&qev->eventTime, adjTime)) {
116 clock_Sub(&qev->eventTime, adjTime);
120 if (qep->epochSec > adjTime->sec) {
121 qep->epochSec -= adjTime->sec;
127 /* Pass in the number of events to allocate at a time */
128 int rxevent_initialized = 0;
130 rxevent_Init(int nEvents, void (*scheduler) (void))
133 if (rxevent_initialized) {
137 MUTEX_INIT(&rxevent_lock, "rxevent_lock", MUTEX_DEFAULT, 0);
140 rxevent_allocUnit = nEvents;
141 queue_Init(&rxevent_free);
142 queue_Init(&rxepoch_free);
143 queue_Init(&rxepoch_queue);
144 rxevent_nFree = rxevent_nPosted = 0;
146 rxevent_ScheduledEarlierEvent = scheduler;
147 rxevent_initialized = 1;
148 clock_Zero(&rxevent_nextRaiseEvents);
149 clock_Zero(&rxevent_lastEvent);
150 rxevent_raiseScheduled = 0;
154 /* Create and initialize new epoch structure */
156 rxepoch_Allocate(struct clock *when)
161 /* If we are short on free epoch entries, create a block of new oned
162 * and add them to the free queue */
163 if (queue_IsEmpty(&rxepoch_free)) {
164 #if defined(AFS_AIX32_ENV) && defined(KERNEL)
165 ep = rxi_Alloc(sizeof(struct rxepoch));
166 queue_Append(&rxepoch_free, &ep[0]), rxepoch_nFree++;
168 #if defined(KERNEL) && !defined(UKERNEL) && defined(AFS_FBSD80_ENV)
169 ep = (struct rxepoch *)
170 afs_osi_Alloc_NoSleep(sizeof(struct rxepoch) * rxepoch_allocUnit);
173 (struct xfreelist *)afs_osi_Alloc_NoSleep(sizeof(struct xfreelist));
175 ep = (struct rxepoch *)
176 osi_Alloc(sizeof(struct rxepoch) * rxepoch_allocUnit);
179 (struct xfreelist *)osi_Alloc(sizeof(struct xfreelist));
181 xfreemallocs->mem = (void *)ep;
182 xfreemallocs->size = sizeof(struct rxepoch) * rxepoch_allocUnit;
183 xfreemallocs->next = xsp;
184 for (i = 0; i < rxepoch_allocUnit; i++)
185 queue_Append(&rxepoch_free, &ep[i]), rxepoch_nFree++;
188 ep = queue_First(&rxepoch_free, rxepoch);
191 ep->epochSec = when->sec;
192 queue_Init(&ep->events);
196 /* Add the indicated event (function, arg) at the specified clock time. The
197 * "when" argument specifies when "func" should be called, in clock (clock.h)
200 static struct rxevent *
201 _rxevent_Post(struct clock *when, struct clock *now,
202 void (*func) (struct rxevent *, void *, void *, int),
203 void *arg, void *arg1, int arg2, int newargs)
205 struct rxevent *ev, *evqe, *evqpr;
206 struct rxepoch *ep, *epqe, *epqpr;
209 MUTEX_ENTER(&rxevent_lock);
213 clock_GetTime(&now1);
214 fprintf(rx_Log_event, "%ld.%ld: rxevent_Post(%ld.%ld, "
215 "%"AFS_PTR_FMT", %"AFS_PTR_FMT", "
216 "%"AFS_PTR_FMT", %d)\n",
217 afs_printable_int32_ld(now1.sec),
218 afs_printable_int32_ld(now1.usec),
219 afs_printable_int32_ld(when->sec),
220 afs_printable_int32_ld(when->usec),
225 /* If a time was provided, check for consistency */
227 if (clock_Gt(&rxevent_lastEvent, now)) {
228 struct clock adjTime = rxevent_lastEvent;
229 clock_Sub(&adjTime, now);
230 rxevent_adjTimes(&adjTime);
232 rxevent_lastEvent = *now;
234 /* Get a pointer to the epoch for this event, if none is found then
235 * create a new epoch and insert it into the sorted list */
236 for (ep = NULL, queue_ScanBackwards(&rxepoch_queue, epqe, epqpr, rxepoch)) {
237 if (when->sec == epqe->epochSec) {
238 /* already have an structure for this epoch */
240 if (ep == queue_First(&rxepoch_queue, rxepoch))
243 } else if (when->sec > epqe->epochSec) {
244 /* Create a new epoch and insert after qe */
245 ep = rxepoch_Allocate(when);
246 queue_InsertAfter(epqe, ep);
251 /* Create a new epoch and place it at the head of the list */
252 ep = rxepoch_Allocate(when);
253 queue_Prepend(&rxepoch_queue, ep);
257 /* If we're short on free event entries, create a block of new ones and add
258 * them to the free queue */
259 if (queue_IsEmpty(&rxevent_free)) {
261 #if defined(AFS_AIX32_ENV) && defined(KERNEL)
262 ev = rxi_Alloc(sizeof(struct rxevent));
263 queue_Append(&rxevent_free, &ev[0]), rxevent_nFree++;
266 #if defined(KERNEL) && !defined(UKERNEL) && defined(AFS_FBSD80_ENV)
267 ev = (struct rxevent *)afs_osi_Alloc_NoSleep(sizeof(struct rxevent) *
271 (struct xfreelist *)afs_osi_Alloc_NoSleep(sizeof(struct xfreelist));
273 ev = (struct rxevent *)osi_Alloc(sizeof(struct rxevent) *
277 (struct xfreelist *)osi_Alloc(sizeof(struct xfreelist));
279 xfreemallocs->mem = (void *)ev;
280 xfreemallocs->size = sizeof(struct rxevent) * rxevent_allocUnit;
281 xfreemallocs->next = xsp;
282 for (i = 0; i < rxevent_allocUnit; i++)
283 queue_Append(&rxevent_free, &ev[i]), rxevent_nFree++;
287 /* Grab and initialize a new rxevent structure */
288 ev = queue_First(&rxevent_free, rxevent);
292 /* Record user defined event state */
293 ev->eventTime = *when;
295 ev->func.newfunc = func;
297 ev->func.oldfunc = (void (*)(struct rxevent *, void *, void*))func;
302 ev->newargs = newargs;
303 rxevent_nPosted += 1; /* Rather than ++, to shut high-C up
304 * regarding never-set variables
307 /* Insert the event into the sorted list of events for this epoch */
308 for (queue_ScanBackwards(&ep->events, evqe, evqpr, rxevent)) {
309 if (when->usec >= evqe->eventTime.usec) {
310 /* Insert event after evqe */
311 queue_InsertAfter(evqe, ev);
312 MUTEX_EXIT(&rxevent_lock);
316 /* Insert event at head of current epoch */
317 queue_Prepend(&ep->events, ev);
318 if (isEarliest && rxevent_ScheduledEarlierEvent
319 && (!rxevent_raiseScheduled
320 || clock_Lt(&ev->eventTime, &rxevent_nextRaiseEvents))) {
321 rxevent_raiseScheduled = 1;
322 clock_Zero(&rxevent_nextRaiseEvents);
323 MUTEX_EXIT(&rxevent_lock);
324 /* Notify our external scheduler */
325 (*rxevent_ScheduledEarlierEvent) ();
326 MUTEX_ENTER(&rxevent_lock);
328 MUTEX_EXIT(&rxevent_lock);
333 rxevent_Post(struct clock *when,
334 void (*func) (struct rxevent *, void *, void *),
335 void *arg, void *arg1)
339 return _rxevent_Post(when, &now,
340 (void (*)(struct rxevent *, void *, void *, int))func,
345 rxevent_Post2(struct clock *when,
346 void (*func) (struct rxevent *, void *, void *, int),
347 void *arg, void *arg1, int arg2)
351 return _rxevent_Post(when, &now, func, arg, arg1, arg2, 1);
355 rxevent_PostNow(struct clock *when, struct clock *now,
356 void (*func) (struct rxevent *, void *, void *),
357 void *arg, void *arg1)
359 return _rxevent_Post(when, now,
360 (void (*)(struct rxevent *, void *, void *, int))func,
365 rxevent_PostNow2(struct clock *when, struct clock *now,
366 void (*func) (struct rxevent *, void *, void *, int),
367 void *arg, void *arg1, int arg2)
369 return _rxevent_Post(when, now, func, arg, arg1, arg2, 1);
372 /* Cancel an event by moving it from the event queue to the free list.
373 * Warning, the event must be on the event queue! If not, this should core
374 * dump (reference through 0). This routine should be called using the macro
375 * event_Cancel, which checks for a null event and also nulls the caller's
376 * event pointer after cancelling the event.
378 #ifdef RX_ENABLE_LOCKS
379 #ifdef RX_REFCOUNT_CHECK
380 int rxevent_Cancel_type = 0;
385 rxevent_Cancel_1(struct rxevent *ev, struct rx_call *call,
392 fprintf(rx_Log_event, "%d.%d: rxevent_Cancel_1(%d.%d, %"
393 AFS_PTR_FMT ", %p" AFS_PTR_FMT ")\n",
394 (int)now.sec, (int)now.usec, (int)ev->eventTime.sec,
395 (int)ev->eventTime.usec, ev->func.newfunc,
399 /* Append it to the free list (rather than prepending) to keep the free
400 * list hot so nothing pages out
402 MUTEX_ENTER(&rxevent_lock);
404 MUTEX_EXIT(&rxevent_lock);
407 #ifdef RX_ENABLE_LOCKS
408 /* It's possible we're currently processing this event. */
409 if (queue_IsOnQueue(ev)) {
410 queue_MoveAppend(&rxevent_free, ev);
415 #ifdef RX_REFCOUNT_CHECK
416 call->refCDebug[type]--;
417 if (call->refCDebug[type] < 0) {
418 rxevent_Cancel_type = type;
419 osi_Panic("rxevent_Cancel: call refCount < 0");
421 #endif /* RX_REFCOUNT_CHECK */
424 #else /* RX_ENABLE_LOCKS */
425 queue_MoveAppend(&rxevent_free, ev);
428 #endif /* RX_ENABLE_LOCKS */
429 MUTEX_EXIT(&rxevent_lock);
432 /* Process all epochs that have expired relative to the current clock time
433 * (which is not re-evaluated unless clock_NewTime has been called). The
434 * relative time to the next epoch is returned in the output parameter next
435 * and the function returns 1. If there are is no next epoch, the function
439 rxevent_RaiseEvents(struct clock *next)
443 volatile struct clock now;
444 MUTEX_ENTER(&rxevent_lock);
446 /* Events are sorted by time, so only scan until an event is found that has
447 * not yet timed out */
450 while (queue_IsNotEmpty(&rxepoch_queue)) {
451 ep = queue_First(&rxepoch_queue, rxepoch);
452 if (queue_IsEmpty(&ep->events)) {
454 queue_Append(&rxepoch_free, ep);
460 ev = queue_First(&ep->events, rxevent);
461 if (clock_Lt(&now, &ev->eventTime)) {
463 if (clock_Gt(&rxevent_lastEvent, &now)) {
464 struct clock adjTime = rxevent_lastEvent;
466 clock_Sub(&adjTime, &now);
467 adjusted = rxevent_adjTimes(&adjTime);
468 rxevent_lastEvent = now;
472 if (clock_Lt(&now, &ev->eventTime)) {
473 *next = rxevent_nextRaiseEvents = ev->eventTime;
474 rxevent_raiseScheduled = 1;
475 clock_Sub(next, &now);
476 MUTEX_EXIT(&rxevent_lock);
482 MUTEX_EXIT(&rxevent_lock);
484 ev->func.newfunc(ev, ev->arg, ev->arg1, ev->arg2);
486 ev->func.oldfunc(ev, ev->arg, ev->arg1);
488 MUTEX_ENTER(&rxevent_lock);
489 queue_Append(&rxevent_free, ev);
491 } while (queue_IsNotEmpty(&ep->events));
495 fprintf(rx_Log_event, "rxevent_RaiseEvents(%d.%d)\n", (int)now.sec,
498 rxevent_raiseScheduled = 0;
499 MUTEX_EXIT(&rxevent_lock);
504 shutdown_rxevent(void)
506 struct xfreelist *xp, *nxp;
509 if (!rxevent_initialized) {
513 rxevent_initialized = 0;
515 MUTEX_DESTROY(&rxevent_lock);
516 #if defined(AFS_AIX32_ENV) && defined(KERNEL)
517 /* Everything is freed in afs_osinet.c */
522 osi_Free((char *)xp->mem, xp->size);
523 osi_Free((char *)xp, sizeof(struct xfreelist));