2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
11 #include "../afs/param.h"
13 #include "../afs/afs_osi.h"
15 #include "../afs/sysincludes.h"
16 #include "../afs/afsincludes.h"
18 #include "../rx/rx_clock.h"
19 #include "../rx/rx_queue.h"
20 #include "../rx/rx_event.h"
21 #include "../rx/rx_kernel.h"
22 #include "../rx/rx_kmutex.h"
23 #ifdef RX_ENABLE_LOCKS
25 #endif /* RX_ENABLE_LOCKS */
26 #include "../rx/rx_globals.h"
27 #if defined(AFS_SGI_ENV)
28 #include "../sys/debug.h"
29 /* These are necessary to get curproc (used by GLOCK asserts) to work. */
30 #include "../h/proc.h"
31 #if !defined(AFS_SGI64_ENV) && !defined(UKERNEL)
32 #include "../h/user.h"
34 extern void *osi_Alloc();
37 #include "afs/param.h"
43 #ifdef AFS_PTHREAD_ENV
44 #include <rx/rx_pthread.h>
48 #ifdef RX_ENABLE_LOCKS
50 #endif /* RX_ENABLE_LOCKS */
51 #include "rx_globals.h"
58 /* All event processing is relative to the apparent current time given by clock_GetTime */
60 /* This should be static, but event_test wants to look at the free list... */
61 struct rx_queue rxevent_free; /* It's somewhat bogus to use a doubly-linked queue for the free list */
62 struct rx_queue rxepoch_free; /* It's somewhat bogus to use a doubly-linked queue for the free list */
63 static struct rx_queue rxepoch_queue; /* list of waiting epochs */
64 static int rxevent_allocUnit = 10; /* Allocation unit (number of event records to allocate at one time) */
65 static int rxepoch_allocUnit = 10; /* Allocation unit (number of epoch records to allocate at one time) */
66 int rxevent_nFree; /* Number of free event records */
67 int rxevent_nPosted; /* Current number of posted events */
68 int rxepoch_nFree; /* Number of free epoch records */
69 static int (*rxevent_ScheduledEarlierEvent)(); /* Proc to call when an event is scheduled that is earlier than all other events */
73 struct xfreelist *next;
75 static struct xfreelist *xfreemallocs = 0, *xsp = 0;
77 struct clock rxevent_nextRaiseEvents; /* Time of next call to raise events */
78 int rxevent_raiseScheduled; /* true if raise events is scheduled */
80 #ifdef RX_ENABLE_LOCKS
82 /* rxdb_fileID is used to identify the lock location, along with line#. */
83 static int rxdb_fileID = RXDB_FILE_RX_EVENT;
84 #endif /* RX_LOCKS_DB */
85 #define RX_ENABLE_LOCKS 1
86 afs_kmutex_t rxevent_lock;
87 #endif /* RX_ENABLE_LOCKS */
89 #ifdef AFS_PTHREAD_ENV
91 * This mutex protects the following global variables:
96 pthread_mutex_t rx_event_mutex;
97 #define LOCK_EV_INIT assert(pthread_mutex_lock(&rx_event_mutex)==0);
98 #define UNLOCK_EV_INIT assert(pthread_mutex_unlock(&rx_event_mutex)==0);
101 #define UNLOCK_EV_INIT
102 #endif /* AFS_PTHREAD_ENV */
105 /* Pass in the number of events to allocate at a time */
106 int rxevent_initialized = 0;
108 rxevent_Init(nEvents, scheduler)
113 if (rxevent_initialized) {
117 MUTEX_INIT(&rxevent_lock, "rxevent_lock", MUTEX_DEFAULT, 0);
119 if (nEvents) rxevent_allocUnit = nEvents;
120 queue_Init(&rxevent_free);
121 queue_Init(&rxepoch_free);
122 queue_Init(&rxepoch_queue);
123 rxevent_nFree = rxevent_nPosted = 0;
125 rxevent_ScheduledEarlierEvent = scheduler;
126 rxevent_initialized = 1;
127 clock_Zero(&rxevent_nextRaiseEvents);
128 rxevent_raiseScheduled = 0;
132 /* Create and initialize new epoch structure */
133 struct rxepoch *rxepoch_Allocate(struct clock *when)
138 /* If we are short on free epoch entries, create a block of new oned
139 * and add them to the free queue */
140 if (queue_IsEmpty(&rxepoch_free)) {
141 #if defined(AFS_AIX32_ENV) && defined(KERNEL)
142 ep = (struct rxepoch *) rxi_Alloc(sizeof(struct rxepoch));
143 queue_Append(&rxepoch_free, &ep[0]), rxepoch_nFree++;
145 ep = (struct rxepoch *)
146 osi_Alloc(sizeof(struct rxepoch) * rxepoch_allocUnit);
148 xfreemallocs = (struct xfreelist *) osi_Alloc(sizeof(struct xfreelist));
149 xfreemallocs->mem = (void *)ep;
150 xfreemallocs->size = sizeof(struct rxepoch) * rxepoch_allocUnit;
151 xfreemallocs->next = xsp;
152 for (i = 0; i<rxepoch_allocUnit; i++)
153 queue_Append(&rxepoch_free, &ep[i]), rxepoch_nFree++;
156 ep = queue_First(&rxepoch_free, rxepoch);
159 ep->epochSec = when->sec;
160 queue_Init(&ep->events);
164 /* Add the indicated event (function, arg) at the specified clock time. The
165 * "when" argument specifies when "func" should be called, in clock (clock.h)
168 struct rxevent *rxevent_Post(struct clock *when, void (*func)(),
169 void *arg, void *arg1)
171 register struct rxevent *ev, *evqe, *evqpr;
172 register struct rxepoch *ep, *epqe, *epqpr;
175 MUTEX_ENTER(&rxevent_lock);
176 AFS_ASSERT_RXGLOCK();
181 fprintf(rx_Log_event, "%d.%d: rxevent_Post(%d.%d, %x, %x)\n",
182 (int) now.sec, (int) now.usec, (int) when->sec,
183 (int) when->usec, (unsigned int) func, (unsigned int) arg);
187 /* Get a pointer to the epoch for this event, if none is found then
188 * create a new epoch and insert it into the sorted list */
189 for (ep = NULL, queue_ScanBackwards(&rxepoch_queue, epqe, epqpr, rxepoch)) {
190 if (when->sec == epqe->epochSec) {
191 /* already have an structure for this epoch */
193 if (ep == queue_First(&rxepoch_queue, rxepoch))
196 } else if (when->sec > epqe->epochSec) {
197 /* Create a new epoch and insert after qe */
198 ep = rxepoch_Allocate(when);
199 queue_InsertAfter(epqe, ep);
204 /* Create a new epoch and place it at the head of the list */
205 ep = rxepoch_Allocate(when);
206 queue_Prepend(&rxepoch_queue, ep);
210 /* If we're short on free event entries, create a block of new ones and add
211 * them to the free queue */
212 if (queue_IsEmpty(&rxevent_free)) {
214 #if defined(AFS_AIX32_ENV) && defined(KERNEL)
215 ev = (struct rxevent *) rxi_Alloc(sizeof(struct rxevent));
216 queue_Append(&rxevent_free, &ev[0]), rxevent_nFree++;
218 ev = (struct rxevent *) osi_Alloc(sizeof(struct rxevent) * rxevent_allocUnit);
220 xfreemallocs = (struct xfreelist *) osi_Alloc(sizeof(struct xfreelist));
221 xfreemallocs->mem = (void *)ev;
222 xfreemallocs->size = sizeof(struct rxevent) * rxevent_allocUnit;
223 xfreemallocs->next = xsp;
224 for (i = 0; i<rxevent_allocUnit; i++)
225 queue_Append(&rxevent_free, &ev[i]), rxevent_nFree++;
229 /* Grab and initialize a new rxevent structure */
230 ev = queue_First(&rxevent_free, rxevent);
234 /* Record user defined event state */
235 ev->eventTime = *when;
239 rxevent_nPosted += 1; /* Rather than ++, to shut high-C up
240 * regarding never-set variables
243 /* Insert the event into the sorted list of events for this epoch */
244 for (queue_ScanBackwards(&ep->events, evqe, evqpr, rxevent)) {
245 if (when->usec >= evqe->eventTime.usec) {
246 /* Insert event after evqe */
247 queue_InsertAfter(evqe, ev);
248 MUTEX_EXIT(&rxevent_lock);
252 /* Insert event at head of current epoch */
253 queue_Prepend(&ep->events, ev);
254 if (isEarliest && rxevent_ScheduledEarlierEvent &&
255 (!rxevent_raiseScheduled ||
256 clock_Lt(&ev->eventTime, &rxevent_nextRaiseEvents))) {
257 rxevent_raiseScheduled = 1;
258 clock_Zero(&rxevent_nextRaiseEvents);
259 MUTEX_EXIT(&rxevent_lock);
260 /* Notify our external scheduler */
261 (*rxevent_ScheduledEarlierEvent)();
262 MUTEX_ENTER(&rxevent_lock);
264 MUTEX_EXIT(&rxevent_lock);
268 /* Cancel an event by moving it from the event queue to the free list.
269 * Warning, the event must be on the event queue! If not, this should core
270 * dump (reference through 0). This routine should be called using the macro
271 * event_Cancel, which checks for a null event and also nulls the caller's
272 * event pointer after cancelling the event.
274 #ifdef RX_ENABLE_LOCKS
275 #ifdef RX_REFCOUNT_CHECK
276 int rxevent_Cancel_type = 0;
277 void rxevent_Cancel_1(ev, call, type)
278 register struct rxevent *ev;
279 register struct rx_call *call;
281 #else /* RX_REFCOUNT_CHECK */
282 void rxevent_Cancel_1(ev, call)
283 register struct rxevent *ev;
284 register struct rx_call *call;
285 #endif /* RX_REFCOUNT_CHECK */
286 #else /* RX_ENABLE_LOCKS */
287 void rxevent_Cancel_1(ev)
288 register struct rxevent *ev;
289 #endif /* RX_ENABLE_LOCKS */
295 fprintf(rx_Log_event, "%d.%d: rxevent_Cancel_1(%d.%d, %x, %x)\n",
296 (int) now.sec, (int) now.usec, (int) ev->eventTime.sec,
297 (int) ev->eventTime.usec, (unsigned int) ev->func,
298 (unsigned int) ev->arg);
301 /* Append it to the free list (rather than prepending) to keep the free
302 * list hot so nothing pages out
304 AFS_ASSERT_RXGLOCK();
305 MUTEX_ENTER(&rxevent_lock);
307 MUTEX_EXIT(&rxevent_lock);
310 #ifdef RX_ENABLE_LOCKS
311 /* It's possible we're currently processing this event. */
312 if (queue_IsOnQueue(ev)) {
313 queue_MoveAppend(&rxevent_free, ev);
318 #ifdef RX_REFCOUNT_CHECK
319 call->refCDebug[type]--;
320 if (call->refCDebug[type]<0) {
321 rxevent_Cancel_type = type;
322 osi_Panic("rxevent_Cancel: call refCount < 0");
324 #endif /* RX_REFCOUNT_CHECK */
327 #else /* RX_ENABLE_LOCKS */
328 queue_MoveAppend(&rxevent_free, ev);
331 #endif /* RX_ENABLE_LOCKS */
332 MUTEX_EXIT(&rxevent_lock);
335 /* Process all epochs that have expired relative to the current clock time
336 * (which is not re-evaluated unless clock_NewTime has been called). The
337 * relative time to the next epoch is returned in the output parameter next
338 * and the function returns 1. If there are is no next epoch, the function
341 int rxevent_RaiseEvents(next)
344 register struct rxepoch *ep;
345 register struct rxevent *ev;
348 MUTEX_ENTER(&rxevent_lock);
350 AFS_ASSERT_RXGLOCK();
352 /* Events are sorted by time, so only scan until an event is found that has
353 * not yet timed out */
356 while (queue_IsNotEmpty(&rxepoch_queue)) {
357 ep = queue_First(&rxepoch_queue, rxepoch);
358 if (queue_IsEmpty(&ep->events)) {
360 queue_Append(&rxepoch_free, ep);
365 ev = queue_First(&ep->events, rxevent);
366 if (clock_Lt(&now, &ev->eventTime)) {
368 if (clock_Lt(&now, &ev->eventTime)) {
369 *next = rxevent_nextRaiseEvents = ev->eventTime;
370 rxevent_raiseScheduled = 1;
371 clock_Sub(next, &now);
372 MUTEX_EXIT(&rxevent_lock);
378 MUTEX_EXIT(&rxevent_lock);
379 ev->func(ev, ev->arg, ev->arg1);
380 MUTEX_ENTER(&rxevent_lock);
381 queue_Append(&rxevent_free, ev);
383 } while (queue_IsNotEmpty(&ep->events));
386 if (rx_Log_event) fprintf(rx_Log_event, "rxevent_RaiseEvents(%d.%d)\n",
387 (int) now.sec, (int) now.usec);
389 rxevent_raiseScheduled = 0;
390 MUTEX_EXIT(&rxevent_lock);
394 void shutdown_rxevent(void)
396 struct xfreelist *xp, *nxp;
399 if (!rxevent_initialized) {
403 rxevent_initialized = 0;
405 MUTEX_DESTROY(&rxevent_lock);
406 #if defined(AFS_AIX32_ENV) && defined(KERNEL)
407 /* Everything is freed in afs_osinet.c */
412 osi_Free((char *)xp->mem, xp->size);
413 osi_Free((char *)xp, sizeof(struct xfreelist));