/*
* Copyright 2000, International Business Machines Corporation and others.
* All Rights Reserved.
- *
+ *
* This software has been released under the terms of the IBM Public
* License. For details, see the LICENSE file in the top-level source
* directory or online at http://www.openafs.org/dl/license10.html
#include <sys/time_impl.h>
#endif
-RCSID
- ("$Header$");
#ifdef KERNEL
#ifndef UKERNEL
static struct xfreelist *xfreemallocs = 0, *xsp = 0;
struct clock rxevent_nextRaiseEvents; /* Time of next call to raise events */
+struct clock rxevent_lastEvent; /* backwards time detection */
int rxevent_raiseScheduled; /* true if raise events is scheduled */
#ifdef RX_ENABLE_LOCKS
*/
#include <assert.h>
-pthread_mutex_t rx_event_mutex;
-#define LOCK_EV_INIT assert(pthread_mutex_lock(&rx_event_mutex)==0)
-#define UNLOCK_EV_INIT assert(pthread_mutex_unlock(&rx_event_mutex)==0)
+afs_kmutex_t rx_event_mutex;
+#define LOCK_EV_INIT MUTEX_ENTER(&rx_event_mutex)
+#define UNLOCK_EV_INIT MUTEX_EXIT(&rx_event_mutex)
#else
#define LOCK_EV_INIT
#define UNLOCK_EV_INIT
#endif /* AFS_PTHREAD_ENV */
+int
+rxevent_adjTimes(struct clock *adjTime)
+{
+ /* backwards clock correction */
+ int nAdjusted = 0;
+ struct rxepoch *qep, *nqep;
+ struct rxevent *qev, *nqev;
+
+ for (queue_Scan(&rxepoch_queue, qep, nqep, rxepoch)) {
+ for (queue_Scan(&qep->events, qev, nqev, rxevent)) {
+ if (clock_Gt(&qev->eventTime, adjTime)) {
+ clock_Sub(&qev->eventTime, adjTime);
+ nAdjusted++;
+ }
+ }
+ if (qep->epochSec > adjTime->sec) {
+ qep->epochSec -= adjTime->sec;
+ }
+ }
+ return nAdjusted;
+}
+
/* Pass in the number of events to allocate at a time */
int rxevent_initialized = 0;
void
rxevent_ScheduledEarlierEvent = scheduler;
rxevent_initialized = 1;
clock_Zero(&rxevent_nextRaiseEvents);
+ clock_Zero(&rxevent_lastEvent);
rxevent_raiseScheduled = 0;
UNLOCK_EV_INIT;
}
* and add them to the free queue */
if (queue_IsEmpty(&rxepoch_free)) {
#if defined(AFS_AIX32_ENV) && defined(KERNEL)
- ep = (struct rxepoch *)rxi_Alloc(sizeof(struct rxepoch));
+ ep = rxi_Alloc(sizeof(struct rxepoch));
queue_Append(&rxepoch_free, &ep[0]), rxepoch_nFree++;
#else
+#if defined(KERNEL) && !defined(UKERNEL) && defined(AFS_FBSD80_ENV)
+ ep = (struct rxepoch *)
+ afs_osi_Alloc_NoSleep(sizeof(struct rxepoch) * rxepoch_allocUnit);
+ xsp = xfreemallocs;
+ xfreemallocs =
+ (struct xfreelist *)afs_osi_Alloc_NoSleep(sizeof(struct xfreelist));
+#else
ep = (struct rxepoch *)
osi_Alloc(sizeof(struct rxepoch) * rxepoch_allocUnit);
xsp = xfreemallocs;
xfreemallocs =
(struct xfreelist *)osi_Alloc(sizeof(struct xfreelist));
+#endif
xfreemallocs->mem = (void *)ep;
xfreemallocs->size = sizeof(struct rxepoch) * rxepoch_allocUnit;
xfreemallocs->next = xsp;
* "when" argument specifies when "func" should be called, in clock (clock.h)
* units. */
-#if 0
-struct rxevent *
-rxevent_Post(struct clock *when,
- void (*func) (struct rxevent * event,
- struct rx_connection * conn,
- struct rx_call * acall), void *arg, void *arg1)
-#else
static struct rxevent *
-_rxevent_Post(struct clock *when, void (*func) (), void *arg, void *arg1,
- int arg2, int newargs)
-#endif
+_rxevent_Post(struct clock *when, struct clock *now,
+ void (*func) (struct rxevent *, void *, void *, int),
+ void *arg, void *arg1, int arg2, int newargs)
{
- register struct rxevent *ev, *evqe, *evqpr;
- register struct rxepoch *ep, *epqe, *epqpr;
+ struct rxevent *ev, *evqe, *evqpr;
+ struct rxepoch *ep, *epqe, *epqpr;
int isEarliest = 0;
MUTEX_ENTER(&rxevent_lock);
#ifdef RXDEBUG
if (rx_Log_event) {
- struct clock now;
- clock_GetTime(&now);
- fprintf(rx_Log_event, "%d.%d: rxevent_Post(%d.%d, %lx, %lx, %lx, %d)\n",
- (int)now.sec, (int)now.usec, (int)when->sec, (int)when->usec,
- (unsigned long)func, (unsigned long)arg,
- (unsigned long)arg1, arg2);
+ struct clock now1;
+ clock_GetTime(&now1);
+ fprintf(rx_Log_event, "%ld.%ld: rxevent_Post(%ld.%ld, "
+ "%"AFS_PTR_FMT", %"AFS_PTR_FMT", "
+ "%"AFS_PTR_FMT", %d)\n",
+ afs_printable_int32_ld(now1.sec),
+ afs_printable_int32_ld(now1.usec),
+ afs_printable_int32_ld(when->sec),
+ afs_printable_int32_ld(when->usec),
+ func, arg,
+ arg1, arg2);
}
#endif
-
+ /* If a time was provided, check for consistency */
+ if (now->sec) {
+ if (clock_Gt(&rxevent_lastEvent, now)) {
+ struct clock adjTime = rxevent_lastEvent;
+ clock_Sub(&adjTime, now);
+ rxevent_adjTimes(&adjTime);
+ }
+ rxevent_lastEvent = *now;
+ }
/* Get a pointer to the epoch for this event, if none is found then
* create a new epoch and insert it into the sorted list */
for (ep = NULL, queue_ScanBackwards(&rxepoch_queue, epqe, epqpr, rxepoch)) {
/* If we're short on free event entries, create a block of new ones and add
* them to the free queue */
if (queue_IsEmpty(&rxevent_free)) {
- register int i;
+ int i;
#if defined(AFS_AIX32_ENV) && defined(KERNEL)
- ev = (struct rxevent *)rxi_Alloc(sizeof(struct rxevent));
+ ev = rxi_Alloc(sizeof(struct rxevent));
queue_Append(&rxevent_free, &ev[0]), rxevent_nFree++;
#else
+
+#if defined(KERNEL) && !defined(UKERNEL) && defined(AFS_FBSD80_ENV)
+ ev = (struct rxevent *)afs_osi_Alloc_NoSleep(sizeof(struct rxevent) *
+ rxevent_allocUnit);
+ xsp = xfreemallocs;
+ xfreemallocs =
+ (struct xfreelist *)afs_osi_Alloc_NoSleep(sizeof(struct xfreelist));
+#else
ev = (struct rxevent *)osi_Alloc(sizeof(struct rxevent) *
rxevent_allocUnit);
xsp = xfreemallocs;
xfreemallocs =
(struct xfreelist *)osi_Alloc(sizeof(struct xfreelist));
+#endif
xfreemallocs->mem = (void *)ev;
xfreemallocs->size = sizeof(struct rxevent) * rxevent_allocUnit;
xfreemallocs->next = xsp;
/* Record user defined event state */
ev->eventTime = *when;
- ev->func = func;
+ if (newargs) {
+ ev->func.newfunc = func;
+ } else {
+ ev->func.oldfunc = (void (*)(struct rxevent *, void *, void*))func;
+ }
ev->arg = arg;
ev->arg1 = arg1;
ev->arg2 = arg2;
}
struct rxevent *
-rxevent_Post(struct clock *when, void (*func) (), void *arg, void *arg1)
+rxevent_Post(struct clock *when,
+ void (*func) (struct rxevent *, void *, void *),
+ void *arg, void *arg1)
+{
+ struct clock now;
+ clock_Zero(&now);
+ return _rxevent_Post(when, &now,
+ (void (*)(struct rxevent *, void *, void *, int))func,
+ arg, arg1, 0, 0);
+}
+
+struct rxevent *
+rxevent_Post2(struct clock *when,
+ void (*func) (struct rxevent *, void *, void *, int),
+ void *arg, void *arg1, int arg2)
+{
+ struct clock now;
+ clock_Zero(&now);
+ return _rxevent_Post(when, &now, func, arg, arg1, arg2, 1);
+}
+
+struct rxevent *
+rxevent_PostNow(struct clock *when, struct clock *now,
+ void (*func) (struct rxevent *, void *, void *),
+ void *arg, void *arg1)
{
- return _rxevent_Post(when, func, arg, arg1, 0, 0);
+ return _rxevent_Post(when, now,
+ (void (*)(struct rxevent *, void *, void *, int))func,
+ arg, arg1, 0, 0);
}
struct rxevent *
-rxevent_Post2(struct clock *when, void (*func) (), void *arg, void *arg1,
- int arg2)
+rxevent_PostNow2(struct clock *when, struct clock *now,
+ void (*func) (struct rxevent *, void *, void *, int),
+ void *arg, void *arg1, int arg2)
{
- return _rxevent_Post(when, func, arg, arg1, arg2, 1);
+ return _rxevent_Post(when, now, func, arg, arg1, arg2, 1);
}
/* Cancel an event by moving it from the event queue to the free list.
#endif
void
-rxevent_Cancel_1(register struct rxevent *ev, register struct rx_call *call,
- register int type)
+rxevent_Cancel_1(struct rxevent *ev, struct rx_call *call,
+ int type)
{
#ifdef RXDEBUG
if (rx_Log_event) {
struct clock now;
clock_GetTime(&now);
- fprintf(rx_Log_event, "%d.%d: rxevent_Cancel_1(%d.%d, %lx, %lx)\n",
+ fprintf(rx_Log_event, "%d.%d: rxevent_Cancel_1(%d.%d, %"
+ AFS_PTR_FMT ", %p" AFS_PTR_FMT ")\n",
(int)now.sec, (int)now.usec, (int)ev->eventTime.sec,
- (int)ev->eventTime.usec, (unsigned long)ev->func,
- (unsigned long)ev->arg);
+ (int)ev->eventTime.usec, ev->func.newfunc,
+ ev->arg);
}
#endif
/* Append it to the free list (rather than prepending) to keep the free
int
rxevent_RaiseEvents(struct clock *next)
{
- register struct rxepoch *ep;
- register struct rxevent *ev;
+ struct rxepoch *ep;
+ struct rxevent *ev;
volatile struct clock now;
-
MUTEX_ENTER(&rxevent_lock);
/* Events are sorted by time, so only scan until an event is found that has
continue;
}
do {
+ reraise:
ev = queue_First(&ep->events, rxevent);
if (clock_Lt(&now, &ev->eventTime)) {
clock_GetTime(&now);
- if (clock_Lt(&now, &ev->eventTime)) {
- *next = rxevent_nextRaiseEvents = ev->eventTime;
- rxevent_raiseScheduled = 1;
- clock_Sub(next, &now);
- MUTEX_EXIT(&rxevent_lock);
- return 1;
+ if (clock_Gt(&rxevent_lastEvent, &now)) {
+ struct clock adjTime = rxevent_lastEvent;
+ int adjusted;
+ clock_Sub(&adjTime, &now);
+ adjusted = rxevent_adjTimes(&adjTime);
+ rxevent_lastEvent = now;
+ if (adjusted > 0)
+ goto reraise;
}
- }
+ if (clock_Lt(&now, &ev->eventTime)) {
+ *next = rxevent_nextRaiseEvents = ev->eventTime;
+ rxevent_raiseScheduled = 1;
+ clock_Sub(next, &now);
+ MUTEX_EXIT(&rxevent_lock);
+ return 1;
+ }
+ }
queue_Remove(ev);
rxevent_nPosted--;
MUTEX_EXIT(&rxevent_lock);
if (ev->newargs) {
- ev->func(ev, ev->arg, ev->arg1, ev->arg2);
+ ev->func.newfunc(ev, ev->arg, ev->arg1, ev->arg2);
} else {
- ev->func(ev, ev->arg, ev->arg1);
+ ev->func.oldfunc(ev, ev->arg, ev->arg1);
}
MUTEX_ENTER(&rxevent_lock);
queue_Append(&rxevent_free, ev);