2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
12 #include "afs/param.h"
14 #include <afs/param.h>
18 #include <sys/time_impl.h>
25 #include "afs/afs_osi.h"
27 #include "afs/sysincludes.h"
28 #include "afsincludes.h"
30 #include "rx/rx_clock.h"
31 #include "rx/rx_queue.h"
32 #include "rx/rx_event.h"
33 #include "rx/rx_kernel.h"
34 #include "rx_kmutex.h"
35 #ifdef RX_ENABLE_LOCKS
37 #endif /* RX_ENABLE_LOCKS */
38 #include "rx/rx_globals.h"
39 #if defined(AFS_SGI_ENV)
40 #include "sys/debug.h"
41 /* These are necessary to get curproc (used by GLOCK asserts) to work. */
43 #if !defined(AFS_SGI64_ENV) && !defined(UKERNEL)
46 extern void *osi_Alloc();
48 #if defined(AFS_OBSD_ENV)
57 #ifdef AFS_PTHREAD_ENV
58 #include <rx/rx_pthread.h>
62 #ifdef RX_ENABLE_LOCKS
64 #endif /* RX_ENABLE_LOCKS */
65 #include "rx_globals.h"
72 /* All event processing is relative to the apparent current time given by clock_GetTime */
74 /* This should be static, but event_test wants to look at the free list... */
75 struct rx_queue rxevent_free; /* It's somewhat bogus to use a doubly-linked queue for the free list */
76 struct rx_queue rxepoch_free; /* It's somewhat bogus to use a doubly-linked queue for the free list */
77 static struct rx_queue rxepoch_queue; /* list of waiting epochs */
78 static int rxevent_allocUnit = 10; /* Allocation unit (number of event records to allocate at one time) */
79 static int rxepoch_allocUnit = 10; /* Allocation unit (number of epoch records to allocate at one time) */
80 int rxevent_nFree; /* Number of free event records */
81 int rxevent_nPosted; /* Current number of posted events */
82 int rxepoch_nFree; /* Number of free epoch records */
83 static void (*rxevent_ScheduledEarlierEvent)(void); /* Proc to call when an event is scheduled that is earlier than all other events */
87 struct xfreelist *next;
89 static struct xfreelist *xfreemallocs = 0, *xsp = 0;
91 struct clock rxevent_nextRaiseEvents; /* Time of next call to raise events */
92 int rxevent_raiseScheduled; /* true if raise events is scheduled */
94 #ifdef RX_ENABLE_LOCKS
96 /* rxdb_fileID is used to identify the lock location, along with line#. */
97 static int rxdb_fileID = RXDB_FILE_RX_EVENT;
98 #endif /* RX_LOCKS_DB */
99 #define RX_ENABLE_LOCKS 1
100 afs_kmutex_t rxevent_lock;
101 #endif /* RX_ENABLE_LOCKS */
103 #ifdef AFS_PTHREAD_ENV
105 * This mutex protects the following global variables:
106 * rxevent_initialized
110 pthread_mutex_t rx_event_mutex;
111 #define LOCK_EV_INIT assert(pthread_mutex_lock(&rx_event_mutex)==0);
112 #define UNLOCK_EV_INIT assert(pthread_mutex_unlock(&rx_event_mutex)==0);
115 #define UNLOCK_EV_INIT
116 #endif /* AFS_PTHREAD_ENV */
119 /* Pass in the number of events to allocate at a time */
120 int rxevent_initialized = 0;
121 void rxevent_Init(int nEvents, void (*scheduler)(void))
124 if (rxevent_initialized) {
128 MUTEX_INIT(&rxevent_lock, "rxevent_lock", MUTEX_DEFAULT, 0);
130 if (nEvents) rxevent_allocUnit = nEvents;
131 queue_Init(&rxevent_free);
132 queue_Init(&rxepoch_free);
133 queue_Init(&rxepoch_queue);
134 rxevent_nFree = rxevent_nPosted = 0;
136 rxevent_ScheduledEarlierEvent = scheduler;
137 rxevent_initialized = 1;
138 clock_Zero(&rxevent_nextRaiseEvents);
139 rxevent_raiseScheduled = 0;
143 /* Create and initialize new epoch structure */
144 struct rxepoch *rxepoch_Allocate(struct clock *when)
149 /* If we are short on free epoch entries, create a block of new oned
150 * and add them to the free queue */
151 if (queue_IsEmpty(&rxepoch_free)) {
152 #if defined(AFS_AIX32_ENV) && defined(KERNEL)
153 ep = (struct rxepoch *) rxi_Alloc(sizeof(struct rxepoch));
154 queue_Append(&rxepoch_free, &ep[0]), rxepoch_nFree++;
156 ep = (struct rxepoch *)
157 osi_Alloc(sizeof(struct rxepoch) * rxepoch_allocUnit);
159 xfreemallocs = (struct xfreelist *) osi_Alloc(sizeof(struct xfreelist));
160 xfreemallocs->mem = (void *)ep;
161 xfreemallocs->size = sizeof(struct rxepoch) * rxepoch_allocUnit;
162 xfreemallocs->next = xsp;
163 for (i = 0; i<rxepoch_allocUnit; i++)
164 queue_Append(&rxepoch_free, &ep[i]), rxepoch_nFree++;
167 ep = queue_First(&rxepoch_free, rxepoch);
170 ep->epochSec = when->sec;
171 queue_Init(&ep->events);
175 /* Add the indicated event (function, arg) at the specified clock time. The
176 * "when" argument specifies when "func" should be called, in clock (clock.h)
180 struct rxevent *rxevent_Post(struct clock *when,
181 void (*func)(struct rxevent *event,
182 struct rx_connection *conn, struct rx_call *acall),
183 void *arg, void *arg1)
185 struct rxevent *rxevent_Post(struct clock *when,
187 void *arg, void *arg1)
190 register struct rxevent *ev, *evqe, *evqpr;
191 register struct rxepoch *ep, *epqe, *epqpr;
194 MUTEX_ENTER(&rxevent_lock);
195 AFS_ASSERT_RXGLOCK();
200 fprintf(rx_Log_event, "%d.%d: rxevent_Post(%d.%d, %x, %x)\n",
201 (int) now.sec, (int) now.usec, (int) when->sec,
202 (int) when->usec, (unsigned int) func, (unsigned int) arg);
206 /* Get a pointer to the epoch for this event, if none is found then
207 * create a new epoch and insert it into the sorted list */
208 for (ep = NULL, queue_ScanBackwards(&rxepoch_queue, epqe, epqpr, rxepoch)) {
209 if (when->sec == epqe->epochSec) {
210 /* already have an structure for this epoch */
212 if (ep == queue_First(&rxepoch_queue, rxepoch))
215 } else if (when->sec > epqe->epochSec) {
216 /* Create a new epoch and insert after qe */
217 ep = rxepoch_Allocate(when);
218 queue_InsertAfter(epqe, ep);
223 /* Create a new epoch and place it at the head of the list */
224 ep = rxepoch_Allocate(when);
225 queue_Prepend(&rxepoch_queue, ep);
229 /* If we're short on free event entries, create a block of new ones and add
230 * them to the free queue */
231 if (queue_IsEmpty(&rxevent_free)) {
233 #if defined(AFS_AIX32_ENV) && defined(KERNEL)
234 ev = (struct rxevent *) rxi_Alloc(sizeof(struct rxevent));
235 queue_Append(&rxevent_free, &ev[0]), rxevent_nFree++;
237 ev = (struct rxevent *) osi_Alloc(sizeof(struct rxevent) * rxevent_allocUnit);
239 xfreemallocs = (struct xfreelist *) osi_Alloc(sizeof(struct xfreelist));
240 xfreemallocs->mem = (void *)ev;
241 xfreemallocs->size = sizeof(struct rxevent) * rxevent_allocUnit;
242 xfreemallocs->next = xsp;
243 for (i = 0; i<rxevent_allocUnit; i++)
244 queue_Append(&rxevent_free, &ev[i]), rxevent_nFree++;
248 /* Grab and initialize a new rxevent structure */
249 ev = queue_First(&rxevent_free, rxevent);
253 /* Record user defined event state */
254 ev->eventTime = *when;
258 rxevent_nPosted += 1; /* Rather than ++, to shut high-C up
259 * regarding never-set variables
262 /* Insert the event into the sorted list of events for this epoch */
263 for (queue_ScanBackwards(&ep->events, evqe, evqpr, rxevent)) {
264 if (when->usec >= evqe->eventTime.usec) {
265 /* Insert event after evqe */
266 queue_InsertAfter(evqe, ev);
267 MUTEX_EXIT(&rxevent_lock);
271 /* Insert event at head of current epoch */
272 queue_Prepend(&ep->events, ev);
273 if (isEarliest && rxevent_ScheduledEarlierEvent &&
274 (!rxevent_raiseScheduled ||
275 clock_Lt(&ev->eventTime, &rxevent_nextRaiseEvents))) {
276 rxevent_raiseScheduled = 1;
277 clock_Zero(&rxevent_nextRaiseEvents);
278 MUTEX_EXIT(&rxevent_lock);
279 /* Notify our external scheduler */
280 (*rxevent_ScheduledEarlierEvent)();
281 MUTEX_ENTER(&rxevent_lock);
283 MUTEX_EXIT(&rxevent_lock);
287 /* Cancel an event by moving it from the event queue to the free list.
288 * Warning, the event must be on the event queue! If not, this should core
289 * dump (reference through 0). This routine should be called using the macro
290 * event_Cancel, which checks for a null event and also nulls the caller's
291 * event pointer after cancelling the event.
293 #ifdef RX_ENABLE_LOCKS
294 #ifdef RX_REFCOUNT_CHECK
295 int rxevent_Cancel_type = 0;
299 void rxevent_Cancel_1(register struct rxevent *ev,
300 register struct rx_call *call, register int type)
306 fprintf(rx_Log_event, "%d.%d: rxevent_Cancel_1(%d.%d, %x, %x)\n",
307 (int) now.sec, (int) now.usec, (int) ev->eventTime.sec,
308 (int) ev->eventTime.usec, (unsigned int) ev->func,
309 (unsigned int) ev->arg);
312 /* Append it to the free list (rather than prepending) to keep the free
313 * list hot so nothing pages out
315 AFS_ASSERT_RXGLOCK();
316 MUTEX_ENTER(&rxevent_lock);
318 MUTEX_EXIT(&rxevent_lock);
321 #ifdef RX_ENABLE_LOCKS
322 /* It's possible we're currently processing this event. */
323 if (queue_IsOnQueue(ev)) {
324 queue_MoveAppend(&rxevent_free, ev);
329 #ifdef RX_REFCOUNT_CHECK
330 call->refCDebug[type]--;
331 if (call->refCDebug[type]<0) {
332 rxevent_Cancel_type = type;
333 osi_Panic("rxevent_Cancel: call refCount < 0");
335 #endif /* RX_REFCOUNT_CHECK */
338 #else /* RX_ENABLE_LOCKS */
339 queue_MoveAppend(&rxevent_free, ev);
342 #endif /* RX_ENABLE_LOCKS */
343 MUTEX_EXIT(&rxevent_lock);
346 /* Process all epochs that have expired relative to the current clock time
347 * (which is not re-evaluated unless clock_NewTime has been called). The
348 * relative time to the next epoch is returned in the output parameter next
349 * and the function returns 1. If there are is no next epoch, the function
352 int rxevent_RaiseEvents(struct clock *next)
354 register struct rxepoch *ep;
355 register struct rxevent *ev;
356 volatile struct clock now;
358 MUTEX_ENTER(&rxevent_lock);
360 AFS_ASSERT_RXGLOCK();
362 /* Events are sorted by time, so only scan until an event is found that has
363 * not yet timed out */
366 while (queue_IsNotEmpty(&rxepoch_queue)) {
367 ep = queue_First(&rxepoch_queue, rxepoch);
368 if (queue_IsEmpty(&ep->events)) {
370 queue_Append(&rxepoch_free, ep);
375 ev = queue_First(&ep->events, rxevent);
376 if (clock_Lt(&now, &ev->eventTime)) {
378 if (clock_Lt(&now, &ev->eventTime)) {
379 *next = rxevent_nextRaiseEvents = ev->eventTime;
380 rxevent_raiseScheduled = 1;
381 clock_Sub(next, &now);
382 MUTEX_EXIT(&rxevent_lock);
388 MUTEX_EXIT(&rxevent_lock);
389 ev->func(ev, ev->arg, ev->arg1);
390 MUTEX_ENTER(&rxevent_lock);
391 queue_Append(&rxevent_free, ev);
393 } while (queue_IsNotEmpty(&ep->events));
396 if (rx_Log_event) fprintf(rx_Log_event, "rxevent_RaiseEvents(%d.%d)\n",
397 (int) now.sec, (int) now.usec);
399 rxevent_raiseScheduled = 0;
400 MUTEX_EXIT(&rxevent_lock);
404 void shutdown_rxevent(void)
406 struct xfreelist *xp, *nxp;
409 if (!rxevent_initialized) {
413 rxevent_initialized = 0;
415 MUTEX_DESTROY(&rxevent_lock);
416 #if defined(AFS_AIX32_ENV) && defined(KERNEL)
417 /* Everything is freed in afs_osinet.c */
422 osi_Free((char *)xp->mem, xp->size);
423 osi_Free((char *)xp, sizeof(struct xfreelist));