/*
* Copyright 2000, International Business Machines Corporation and others.
* All Rights Reserved.
- *
+ *
* This software has been released under the terms of the IBM Public
* License. For details, see the LICENSE file in the top-level source
* directory or online at http://www.openafs.org/dl/license10.html
*/
+#include <afsconfig.h>
#ifdef KERNEL
-#include "../afs/param.h"
+#include "afs/param.h"
+#else
+#include <afs/param.h>
+#endif
+
+#ifdef AFS_SUN59_ENV
+#include <sys/time_impl.h>
+#endif
+
+
+#ifdef KERNEL
#ifndef UKERNEL
-#include "../afs/afs_osi.h"
+#include "afs/afs_osi.h"
#else /* !UKERNEL */
-#include "../afs/sysincludes.h"
-#include "../afs/afsincludes.h"
+#include "afs/sysincludes.h"
+#include "afsincludes.h"
#endif /* !UKERNEL */
-#include "../rx/rx_clock.h"
-#include "../rx/rx_queue.h"
-#include "../rx/rx_event.h"
-#include "../rx/rx_kernel.h"
-#include "../rx/rx_kmutex.h"
+#include "rx/rx_clock.h"
+#include "rx/rx_queue.h"
+#include "rx/rx_event.h"
+#include "rx/rx_kernel.h"
+#include "rx_kmutex.h"
#ifdef RX_ENABLE_LOCKS
-#include "../rx/rx.h"
+#include "rx/rx.h"
#endif /* RX_ENABLE_LOCKS */
-#include "../rx/rx_globals.h"
+#include "rx/rx_globals.h"
#if defined(AFS_SGI_ENV)
-#include "../sys/debug.h"
+#include "sys/debug.h"
/* These are necessary to get curproc (used by GLOCK asserts) to work. */
-#include "../h/proc.h"
+#include "h/proc.h"
#if !defined(AFS_SGI64_ENV) && !defined(UKERNEL)
-#include "../h/user.h"
+#include "h/user.h"
#endif
extern void *osi_Alloc();
#endif
+#if defined(AFS_OBSD_ENV)
+#include "h/proc.h"
+#endif
#else /* KERNEL */
-#include "afs/param.h"
#include <stdio.h>
#include "rx_clock.h"
#include "rx_queue.h"
/* All event processing is relative to the apparent current time given by clock_GetTime */
/* This should be static, but event_test wants to look at the free list... */
-struct rx_queue rxevent_free; /* It's somewhat bogus to use a doubly-linked queue for the free list */
-struct rx_queue rxepoch_free; /* It's somewhat bogus to use a doubly-linked queue for the free list */
-static struct rx_queue rxepoch_queue; /* list of waiting epochs */
-static int rxevent_allocUnit = 10; /* Allocation unit (number of event records to allocate at one time) */
-static int rxepoch_allocUnit = 10; /* Allocation unit (number of epoch records to allocate at one time) */
-int rxevent_nFree; /* Number of free event records */
-int rxevent_nPosted; /* Current number of posted events */
-int rxepoch_nFree; /* Number of free epoch records */
-static int (*rxevent_ScheduledEarlierEvent)(); /* Proc to call when an event is scheduled that is earlier than all other events */
-struct xfreelist {
+struct rx_queue rxevent_free; /* It's somewhat bogus to use a doubly-linked queue for the free list */
+struct rx_queue rxepoch_free; /* It's somewhat bogus to use a doubly-linked queue for the free list */
+static struct rx_queue rxepoch_queue; /* list of waiting epochs */
+static int rxevent_allocUnit = 10; /* Allocation unit (number of event records to allocate at one time) */
+static int rxepoch_allocUnit = 10; /* Allocation unit (number of epoch records to allocate at one time) */
+int rxevent_nFree; /* Number of free event records */
+int rxevent_nPosted; /* Current number of posted events */
+int rxepoch_nFree; /* Number of free epoch records */
+static void (*rxevent_ScheduledEarlierEvent) (void); /* Proc to call when an event is scheduled that is earlier than all other events */
+struct xfreelist {
void *mem;
int size;
- struct xfreelist *next;
+ struct xfreelist *next;
};
static struct xfreelist *xfreemallocs = 0, *xsp = 0;
struct clock rxevent_nextRaiseEvents; /* Time of next call to raise events */
-int rxevent_raiseScheduled; /* true if raise events is scheduled */
+struct clock rxevent_lastEvent; /* backwards time detection */
+int rxevent_raiseScheduled; /* true if raise events is scheduled */
#ifdef RX_ENABLE_LOCKS
#ifdef RX_LOCKS_DB
*/
#include <assert.h>
-pthread_mutex_t rx_event_mutex;
-#define LOCK_EV_INIT assert(pthread_mutex_lock(&rx_event_mutex)==0);
-#define UNLOCK_EV_INIT assert(pthread_mutex_unlock(&rx_event_mutex)==0);
+afs_kmutex_t rx_event_mutex;
+#define LOCK_EV_INIT MUTEX_ENTER(&rx_event_mutex)
+#define UNLOCK_EV_INIT MUTEX_EXIT(&rx_event_mutex)
#else
#define LOCK_EV_INIT
#define UNLOCK_EV_INIT
#endif /* AFS_PTHREAD_ENV */
+int
+rxevent_adjTimes(struct clock *adjTime)
+{
+ /* backwards clock correction */
+ int nAdjusted = 0;
+ struct rxepoch *qep, *nqep;
+ struct rxevent *qev, *nqev;
+
+ for (queue_Scan(&rxepoch_queue, qep, nqep, rxepoch)) {
+ for (queue_Scan(&qep->events, qev, nqev, rxevent)) {
+ if (clock_Gt(&qev->eventTime, adjTime)) {
+ clock_Sub(&qev->eventTime, adjTime);
+ nAdjusted++;
+ }
+ }
+ if (qep->epochSec > adjTime->sec) {
+ qep->epochSec -= adjTime->sec;
+ }
+ }
+ return nAdjusted;
+}
+
/* Pass in the number of events to allocate at a time */
int rxevent_initialized = 0;
void
-rxevent_Init(nEvents, scheduler)
- int nEvents;
- int (*scheduler)();
+rxevent_Init(int nEvents, void (*scheduler) (void))
{
- LOCK_EV_INIT
+ LOCK_EV_INIT;
if (rxevent_initialized) {
- UNLOCK_EV_INIT
+ UNLOCK_EV_INIT;
return;
}
MUTEX_INIT(&rxevent_lock, "rxevent_lock", MUTEX_DEFAULT, 0);
clock_Init();
- if (nEvents) rxevent_allocUnit = nEvents;
+ if (nEvents)
+ rxevent_allocUnit = nEvents;
queue_Init(&rxevent_free);
queue_Init(&rxepoch_free);
queue_Init(&rxepoch_queue);
rxevent_ScheduledEarlierEvent = scheduler;
rxevent_initialized = 1;
clock_Zero(&rxevent_nextRaiseEvents);
+ clock_Zero(&rxevent_lastEvent);
rxevent_raiseScheduled = 0;
- UNLOCK_EV_INIT
+ UNLOCK_EV_INIT;
}
/* Create and initialize new epoch structure */
-struct rxepoch *rxepoch_Allocate(struct clock *when)
+struct rxepoch *
+rxepoch_Allocate(struct clock *when)
{
struct rxepoch *ep;
int i;
* and add them to the free queue */
if (queue_IsEmpty(&rxepoch_free)) {
#if defined(AFS_AIX32_ENV) && defined(KERNEL)
- ep = (struct rxepoch *) rxi_Alloc(sizeof(struct rxepoch));
+ ep = rxi_Alloc(sizeof(struct rxepoch));
queue_Append(&rxepoch_free, &ep[0]), rxepoch_nFree++;
#else
+#if defined(KERNEL) && !defined(UKERNEL) && defined(AFS_FBSD80_ENV)
+ ep = (struct rxepoch *)
+ afs_osi_Alloc_NoSleep(sizeof(struct rxepoch) * rxepoch_allocUnit);
+ xsp = xfreemallocs;
+ xfreemallocs =
+ (struct xfreelist *)afs_osi_Alloc_NoSleep(sizeof(struct xfreelist));
+#else
ep = (struct rxepoch *)
- osi_Alloc(sizeof(struct rxepoch) * rxepoch_allocUnit);
+ osi_Alloc(sizeof(struct rxepoch) * rxepoch_allocUnit);
xsp = xfreemallocs;
- xfreemallocs = (struct xfreelist *) osi_Alloc(sizeof(struct xfreelist));
+ xfreemallocs =
+ (struct xfreelist *)osi_Alloc(sizeof(struct xfreelist));
+#endif
xfreemallocs->mem = (void *)ep;
xfreemallocs->size = sizeof(struct rxepoch) * rxepoch_allocUnit;
xfreemallocs->next = xsp;
- for (i = 0; i<rxepoch_allocUnit; i++)
+ for (i = 0; i < rxepoch_allocUnit; i++)
queue_Append(&rxepoch_free, &ep[i]), rxepoch_nFree++;
#endif
}
* "when" argument specifies when "func" should be called, in clock (clock.h)
* units. */
-struct rxevent *rxevent_Post(struct clock *when, void (*func)(),
- void *arg, void *arg1)
+static struct rxevent *
+_rxevent_Post(struct clock *when, struct clock *now,
+ void (*func) (struct rxevent *, void *, void *, int),
+ void *arg, void *arg1, int arg2, int newargs)
{
- register struct rxevent *ev, *evqe, *evqpr;
- register struct rxepoch *ep, *epqe, *epqpr;
- struct clock ept;
+ struct rxevent *ev, *evqe, *evqpr;
+ struct rxepoch *ep, *epqe, *epqpr;
int isEarliest = 0;
MUTEX_ENTER(&rxevent_lock);
- AFS_ASSERT_RXGLOCK();
#ifdef RXDEBUG
if (rx_Log_event) {
- struct clock now;
- clock_GetTime(&now);
- fprintf(rx_Log_event, "%d.%d: rxevent_Post(%d.%d, %x, %x)\n", now.sec, now.usec, when->sec, when->usec, func, arg);
+ struct clock now1;
+ clock_GetTime(&now1);
+ fprintf(rx_Log_event, "%ld.%ld: rxevent_Post(%ld.%ld, "
+ "%"AFS_PTR_FMT", %"AFS_PTR_FMT", "
+ "%"AFS_PTR_FMT", %d)\n",
+ afs_printable_int32_ld(now1.sec),
+ afs_printable_int32_ld(now1.usec),
+ afs_printable_int32_ld(when->sec),
+ afs_printable_int32_ld(when->usec),
+ func, arg,
+ arg1, arg2);
}
#endif
-
+ /* If a time was provided, check for consistency */
+ if (now->sec) {
+ if (clock_Gt(&rxevent_lastEvent, now)) {
+ struct clock adjTime = rxevent_lastEvent;
+ clock_Sub(&adjTime, now);
+ rxevent_adjTimes(&adjTime);
+ }
+ rxevent_lastEvent = *now;
+ }
/* Get a pointer to the epoch for this event, if none is found then
* create a new epoch and insert it into the sorted list */
for (ep = NULL, queue_ScanBackwards(&rxepoch_queue, epqe, epqpr, rxepoch)) {
/* If we're short on free event entries, create a block of new ones and add
* them to the free queue */
if (queue_IsEmpty(&rxevent_free)) {
- register int i;
+ int i;
#if defined(AFS_AIX32_ENV) && defined(KERNEL)
- ev = (struct rxevent *) rxi_Alloc(sizeof(struct rxevent));
+ ev = rxi_Alloc(sizeof(struct rxevent));
queue_Append(&rxevent_free, &ev[0]), rxevent_nFree++;
#else
- ev = (struct rxevent *) osi_Alloc(sizeof(struct rxevent) * rxevent_allocUnit);
+
+#if defined(KERNEL) && !defined(UKERNEL) && defined(AFS_FBSD80_ENV)
+ ev = (struct rxevent *)afs_osi_Alloc_NoSleep(sizeof(struct rxevent) *
+ rxevent_allocUnit);
xsp = xfreemallocs;
- xfreemallocs = (struct xfreelist *) osi_Alloc(sizeof(struct xfreelist));
+ xfreemallocs =
+ (struct xfreelist *)afs_osi_Alloc_NoSleep(sizeof(struct xfreelist));
+#else
+ ev = (struct rxevent *)osi_Alloc(sizeof(struct rxevent) *
+ rxevent_allocUnit);
+ xsp = xfreemallocs;
+ xfreemallocs =
+ (struct xfreelist *)osi_Alloc(sizeof(struct xfreelist));
+#endif
xfreemallocs->mem = (void *)ev;
xfreemallocs->size = sizeof(struct rxevent) * rxevent_allocUnit;
xfreemallocs->next = xsp;
- for (i = 0; i<rxevent_allocUnit; i++)
+ for (i = 0; i < rxevent_allocUnit; i++)
queue_Append(&rxevent_free, &ev[i]), rxevent_nFree++;
#endif
}
/* Record user defined event state */
ev->eventTime = *when;
- ev->func = func;
+ if (newargs) {
+ ev->func.newfunc = func;
+ } else {
+ ev->func.oldfunc = (void (*)(struct rxevent *, void *, void*))func;
+ }
ev->arg = arg;
ev->arg1 = arg1;
- rxevent_nPosted += 1; /* Rather than ++, to shut high-C up
- * regarding never-set variables
- */
+ ev->arg2 = arg2;
+ ev->newargs = newargs;
+ rxevent_nPosted += 1; /* Rather than ++, to shut high-C up
+ * regarding never-set variables
+ */
/* Insert the event into the sorted list of events for this epoch */
for (queue_ScanBackwards(&ep->events, evqe, evqpr, rxevent)) {
}
/* Insert event at head of current epoch */
queue_Prepend(&ep->events, ev);
- if (isEarliest && rxevent_ScheduledEarlierEvent &&
- (!rxevent_raiseScheduled ||
- clock_Lt(&ev->eventTime, &rxevent_nextRaiseEvents))) {
+ if (isEarliest && rxevent_ScheduledEarlierEvent
+ && (!rxevent_raiseScheduled
+ || clock_Lt(&ev->eventTime, &rxevent_nextRaiseEvents))) {
rxevent_raiseScheduled = 1;
clock_Zero(&rxevent_nextRaiseEvents);
MUTEX_EXIT(&rxevent_lock);
/* Notify our external scheduler */
- (*rxevent_ScheduledEarlierEvent)();
+ (*rxevent_ScheduledEarlierEvent) ();
MUTEX_ENTER(&rxevent_lock);
}
MUTEX_EXIT(&rxevent_lock);
return ev;
}
+struct rxevent *
+rxevent_Post(struct clock *when,
+ void (*func) (struct rxevent *, void *, void *),
+ void *arg, void *arg1)
+{
+ struct clock now;
+ clock_Zero(&now);
+ return _rxevent_Post(when, &now,
+ (void (*)(struct rxevent *, void *, void *, int))func,
+ arg, arg1, 0, 0);
+}
+
+struct rxevent *
+rxevent_Post2(struct clock *when,
+ void (*func) (struct rxevent *, void *, void *, int),
+ void *arg, void *arg1, int arg2)
+{
+ struct clock now;
+ clock_Zero(&now);
+ return _rxevent_Post(when, &now, func, arg, arg1, arg2, 1);
+}
+
+struct rxevent *
+rxevent_PostNow(struct clock *when, struct clock *now,
+ void (*func) (struct rxevent *, void *, void *),
+ void *arg, void *arg1)
+{
+ return _rxevent_Post(when, now,
+ (void (*)(struct rxevent *, void *, void *, int))func,
+ arg, arg1, 0, 0);
+}
+
+struct rxevent *
+rxevent_PostNow2(struct clock *when, struct clock *now,
+ void (*func) (struct rxevent *, void *, void *, int),
+ void *arg, void *arg1, int arg2)
+{
+ return _rxevent_Post(when, now, func, arg, arg1, arg2, 1);
+}
+
/* Cancel an event by moving it from the event queue to the free list.
* Warning, the event must be on the event queue! If not, this should core
* dump (reference through 0). This routine should be called using the macro
#ifdef RX_ENABLE_LOCKS
#ifdef RX_REFCOUNT_CHECK
int rxevent_Cancel_type = 0;
-void rxevent_Cancel_1(ev, call, type)
- register struct rxevent *ev;
- register struct rx_call *call;
- register int type;
-#else /* RX_REFCOUNT_CHECK */
-void rxevent_Cancel_1(ev, call)
- register struct rxevent *ev;
- register struct rx_call *call;
-#endif /* RX_REFCOUNT_CHECK */
-#else /* RX_ENABLE_LOCKS */
-void rxevent_Cancel_1(ev)
- register struct rxevent *ev;
-#endif /* RX_ENABLE_LOCKS */
+#endif
+#endif
+
+void
+rxevent_Cancel_1(struct rxevent *ev, struct rx_call *call,
+ int type)
{
#ifdef RXDEBUG
if (rx_Log_event) {
struct clock now;
clock_GetTime(&now);
- fprintf(rx_Log_event, "%d.%d: rxevent_Cancel_1(%d.%d, %x, %x)\n", now.sec,
- now.usec, ev->eventTime.sec, ev->eventTime.usec, ev->func,
+ fprintf(rx_Log_event, "%d.%d: rxevent_Cancel_1(%d.%d, %"
+ AFS_PTR_FMT ", %p" AFS_PTR_FMT ")\n",
+ (int)now.sec, (int)now.usec, (int)ev->eventTime.sec,
+ (int)ev->eventTime.usec, ev->func.newfunc,
ev->arg);
}
#endif
/* Append it to the free list (rather than prepending) to keep the free
* list hot so nothing pages out
*/
- AFS_ASSERT_RXGLOCK();
MUTEX_ENTER(&rxevent_lock);
if (!ev) {
MUTEX_EXIT(&rxevent_lock);
call->refCount--;
#ifdef RX_REFCOUNT_CHECK
call->refCDebug[type]--;
- if (call->refCDebug[type]<0) {
+ if (call->refCDebug[type] < 0) {
rxevent_Cancel_type = type;
osi_Panic("rxevent_Cancel: call refCount < 0");
}
* and the function returns 1. If there are is no next epoch, the function
* returns 0.
*/
-int rxevent_RaiseEvents(next)
- struct clock *next;
+int
+rxevent_RaiseEvents(struct clock *next)
{
- register struct rxepoch *ep;
- register struct rxevent *ev;
- struct clock now;
-
+ struct rxepoch *ep;
+ struct rxevent *ev;
+ volatile struct clock now;
MUTEX_ENTER(&rxevent_lock);
- AFS_ASSERT_RXGLOCK();
-
/* Events are sorted by time, so only scan until an event is found that has
* not yet timed out */
continue;
}
do {
+ reraise:
ev = queue_First(&ep->events, rxevent);
if (clock_Lt(&now, &ev->eventTime)) {
clock_GetTime(&now);
- if (clock_Lt(&now, &ev->eventTime)) {
- *next = rxevent_nextRaiseEvents = ev->eventTime;
- rxevent_raiseScheduled = 1;
- clock_Sub(next, &now);
- MUTEX_EXIT(&rxevent_lock);
- return 1;
+ if (clock_Gt(&rxevent_lastEvent, &now)) {
+ struct clock adjTime = rxevent_lastEvent;
+ int adjusted;
+ clock_Sub(&adjTime, &now);
+ adjusted = rxevent_adjTimes(&adjTime);
+ rxevent_lastEvent = now;
+ if (adjusted > 0)
+ goto reraise;
}
- }
+ if (clock_Lt(&now, &ev->eventTime)) {
+ *next = rxevent_nextRaiseEvents = ev->eventTime;
+ rxevent_raiseScheduled = 1;
+ clock_Sub(next, &now);
+ MUTEX_EXIT(&rxevent_lock);
+ return 1;
+ }
+ }
queue_Remove(ev);
rxevent_nPosted--;
MUTEX_EXIT(&rxevent_lock);
- ev->func(ev, ev->arg, ev->arg1);
+ if (ev->newargs) {
+ ev->func.newfunc(ev, ev->arg, ev->arg1, ev->arg2);
+ } else {
+ ev->func.oldfunc(ev, ev->arg, ev->arg1);
+ }
MUTEX_ENTER(&rxevent_lock);
queue_Append(&rxevent_free, ev);
rxevent_nFree++;
} while (queue_IsNotEmpty(&ep->events));
}
#ifdef RXDEBUG
- if (rx_Log_event) fprintf(rx_Log_event, "rxevent_RaiseEvents(%d.%d)\n", now.sec, now.usec);
+ if (rx_Log_event)
+ fprintf(rx_Log_event, "rxevent_RaiseEvents(%d.%d)\n", (int)now.sec,
+ (int)now.usec);
#endif
rxevent_raiseScheduled = 0;
MUTEX_EXIT(&rxevent_lock);
return 0;
}
-void shutdown_rxevent(void)
+void
+shutdown_rxevent(void)
{
struct xfreelist *xp, *nxp;
- LOCK_EV_INIT
+ LOCK_EV_INIT;
if (!rxevent_initialized) {
- UNLOCK_EV_INIT
+ UNLOCK_EV_INIT;
return;
}
rxevent_initialized = 0;
- UNLOCK_EV_INIT
+ UNLOCK_EV_INIT;
MUTEX_DESTROY(&rxevent_lock);
#if defined(AFS_AIX32_ENV) && defined(KERNEL)
/* Everything is freed in afs_osinet.c */
osi_Free((char *)xp, sizeof(struct xfreelist));
xp = nxp;
}
+ xfreemallocs = NULL;
#endif
}