2 * Copyright (c) 2011 Your File System Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR `AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 /* A reimplementation of the rx_event handler using red/black trees
27 * The first rx_event implementation used a simple sorted queue of all
28 * events, which lead to O(n^2) performance, where n is the number of
29 * outstanding events. This was found to scale poorly, so was replaced.
31 * The second implementation used a set of per-second buckets to store
32 * events. Each bucket (referred to as an epoch in the code) stored all
33 * of the events which expired in that second. However, on modern networks
34 * where RTT times are in the millisecond, most connections will have events
35 * expiring within the next second, so the problem reoccurs.
37 * This new implementation uses Red-Black trees to store a sorted list of
38 * events. Red Black trees are guaranteed to have no worse than O(log N)
39 * insertion, and are commonly used in timer applications
42 #include <afsconfig.h>
43 #include <afs/param.h>
46 # include "afs/sysincludes.h"
47 # include "afsincludes.h"
53 #include <opr/queue.h>
54 #include <opr/rbtree.h>
57 #include "rx_atomic.h"
59 #include "rx_globals.h"
63 struct opr_rbtree_node node;
64 struct clock eventTime;
68 void (*func)(struct rxevent *, void *, void *, int);
77 struct malloclist *next;
82 struct opr_queue list;
83 struct malloclist *mallocs;
88 struct opr_rbtree head;
89 struct rxevent *first;
100 static int allocUnit = 10;
102 static struct rxevent *
103 rxevent_alloc(void) {
104 struct rxevent *evlist;
106 struct malloclist *mrec;
109 MUTEX_ENTER(&freeEvents.lock);
110 if (opr_queue_IsEmpty(&freeEvents.list)) {
111 MUTEX_EXIT(&freeEvents.lock);
113 #if defined(AFS_AIX32_ENV) && defined(KERNEL)
114 ev = rxi_Alloc(sizeof(struct rxevent));
116 evlist = osi_Alloc(sizeof(struct rxevent) * allocUnit);
117 mrec = osi_Alloc(sizeof(struct malloclist));
120 mrec->size = sizeof(struct rxevent) * allocUnit;
122 MUTEX_ENTER(&freeEvents.lock);
123 for (i = 1; i < allocUnit; i++) {
124 opr_queue_Append(&freeEvents.list, &evlist[i].q);
126 mrec->next = freeEvents.mallocs;
127 freeEvents.mallocs = mrec;
128 MUTEX_EXIT(&freeEvents.lock);
132 ev = opr_queue_First(&freeEvents.list, struct rxevent, q);
133 opr_queue_Remove(&ev->q);
134 MUTEX_EXIT(&freeEvents.lock);
137 memset(ev, 0, sizeof(struct rxevent));
138 rx_atomic_set(&ev->refcnt, 1);
144 rxevent_free(struct rxevent *ev) {
145 MUTEX_ENTER(&freeEvents.lock);
146 opr_queue_Prepend(&freeEvents.list, &ev->q);
147 MUTEX_EXIT(&freeEvents.lock);
151 rxevent_put(struct rxevent *ev) {
152 if (rx_atomic_dec_and_read(&ev->refcnt) == 0) {
158 rxevent_Put(struct rxevent *ev) {
162 static_inline struct rxevent *
163 rxevent_get(struct rxevent *ev) {
164 rx_atomic_inc(&ev->refcnt);
169 rxevent_Get(struct rxevent *ev) {
170 return rxevent_get(ev);
173 /* Called if the time now is older than the last time we recorded running an
174 * event. This test catches machines where the system time has been set
175 * backwards, and avoids RX completely stalling when timers fail to fire.
177 * Take the different between now and the last event time, and subtract that
178 * from the timing of every event on the system. This does a relatively slow
179 * walk of the completely eventTree, but time-travel will hopefully be a pretty
182 * This can only safely be called from the event thread, as it plays with the
189 struct opr_rbtree_node *node;
190 struct clock adjTime, now;
192 MUTEX_ENTER(&eventTree.lock);
193 /* Time adjustment is expensive, make absolutely certain that we have
194 * to do it, by getting an up to date time to base our decision on
195 * once we've acquired the relevant locks.
198 if (!clock_Lt(&now, &eventSchedule.last))
201 adjTime = eventSchedule.last;
202 clock_Zero(&eventSchedule.last);
204 clock_Sub(&adjTime, &now);
206 node = opr_rbtree_first(&eventTree.head);
208 struct rxevent *event = opr_containerof(node, struct rxevent, node);
210 clock_Sub(&event->eventTime, &adjTime);
211 node = opr_rbtree_next(node);
213 eventSchedule.next = eventTree.first->eventTime;
216 MUTEX_EXIT(&eventTree.lock);
219 static int initialised = 0;
221 rxevent_Init(int nEvents, void (*scheduler)(void))
229 MUTEX_INIT(&eventTree.lock, "event tree lock", MUTEX_DEFAULT, 0);
230 opr_rbtree_init(&eventTree.head);
232 MUTEX_INIT(&freeEvents.lock, "free events lock", MUTEX_DEFAULT, 0);
233 opr_queue_Init(&freeEvents.list);
234 freeEvents.mallocs = NULL;
239 clock_Zero(&eventSchedule.next);
240 clock_Zero(&eventSchedule.last);
241 eventSchedule.raised = 0;
242 eventSchedule.func = scheduler;
246 rxevent_Post(struct clock *when, struct clock *now,
247 void (*func) (struct rxevent *, void *, void *, int),
248 void *arg, void *arg1, int arg2)
250 struct rxevent *ev, *event;
251 struct opr_rbtree_node **childptr, *parent = NULL;
253 ev = rxevent_alloc();
254 ev->eventTime = *when;
260 if (clock_Lt(now, &eventSchedule.last))
263 MUTEX_ENTER(&eventTree.lock);
265 /* Work out where in the tree we'll be storing this */
266 childptr = &eventTree.head.root;
269 event = opr_containerof((*childptr), struct rxevent, node);
272 if (clock_Lt(when, &event->eventTime))
273 childptr = &(*childptr)->left;
274 else if (clock_Gt(when, &event->eventTime))
275 childptr = &(*childptr)->right;
277 opr_queue_Append(&event->q, &ev->q);
281 opr_queue_Init(&ev->q);
282 opr_rbtree_insert(&eventTree.head, parent, childptr, &ev->node);
284 if (eventTree.first == NULL ||
285 clock_Lt(when, &(eventTree.first->eventTime))) {
286 eventTree.first = ev;
287 eventSchedule.raised = 1;
288 clock_Zero(&eventSchedule.next);
289 MUTEX_EXIT(&eventTree.lock);
290 if (eventSchedule.func != NULL)
291 (*eventSchedule.func)();
292 return rxevent_get(ev);
296 MUTEX_EXIT(&eventTree.lock);
297 return rxevent_get(ev);
300 /* We're going to remove ev from the tree, so set the first pointer to the
301 * next event after it */
303 resetFirst(struct rxevent *ev)
305 struct opr_rbtree_node *next = opr_rbtree_next(&ev->node);
307 eventTree.first = opr_containerof(next, struct rxevent, node);
309 eventTree.first = NULL;
313 rxevent_Cancel(struct rxevent **evp, struct rx_call *call, int type)
315 struct rxevent *event;
322 MUTEX_ENTER(&eventTree.lock);
324 if (!event->handled) {
325 /* We're a node on the red/black tree. If our list is non-empty,
326 * then swap the first element in the list in in our place,
327 * promoting it to the list head */
328 if (event->node.parent == NULL
329 && eventTree.head.root != &event->node) {
330 /* Not in the rbtree, therefore must be a list element */
331 opr_queue_Remove(&event->q);
333 if (!opr_queue_IsEmpty(&event->q)) {
334 struct rxevent *next;
336 next = opr_queue_First(&event->q, struct rxevent, q);
337 opr_queue_Remove(&next->q); /* Remove ourselves from list */
338 if (event->q.prev == &event->q) {
339 next->q.prev = next->q.next = &next->q;
342 next->q.prev->next = &next->q;
343 next->q.next->prev = &next->q;
346 opr_rbtree_replace(&eventTree.head, &event->node,
349 if (eventTree.first == event)
350 eventTree.first = next;
353 if (eventTree.first == event)
356 opr_rbtree_remove(&eventTree.head, &event->node);
360 rxevent_put(event); /* Dispose of eventTree reference */
363 MUTEX_EXIT(&eventTree.lock);
366 rxevent_put(event); /* Dispose of caller's reference */
369 CALL_RELE(call, type);
372 /* Process all events which have expired. If events remain, then the relative
373 * time until the next event is returned in the parameter 'wait', and the
374 * function returns 1. If no events currently remain, the function returns 0
376 * If the current time is older than that of the last event processed, then we
377 * assume that time has gone backwards (for example, due to a system time reset)
378 * When this happens, all events in the current queue are rescheduled, using
379 * the difference between the current time and the last event time as a delta
383 rxevent_RaiseEvents(struct clock *wait)
386 struct rxevent *event;
391 /* Check for time going backwards */
392 if (clock_Lt(&now, &eventSchedule.last))
394 eventSchedule.last = now;
396 MUTEX_ENTER(&eventTree.lock);
397 /* Lock our event tree */
398 while (eventTree.first != NULL
399 && clock_Lt(&eventTree.first->eventTime, &now)) {
401 /* Grab the next node, either in the event's list, or in the tree node
402 * itself, and remove it from the event tree */
403 event = eventTree.first;
404 if (!opr_queue_IsEmpty(&event->q)) {
405 event = opr_queue_Last(&event->q, struct rxevent, q);
406 opr_queue_Remove(&event->q);
409 opr_rbtree_remove(&eventTree.head, &event->node);
412 MUTEX_EXIT(&eventTree.lock);
414 /* Fire the event, then free the structure */
415 event->func(event, event->arg, event->arg1, event->arg2);
418 MUTEX_ENTER(&eventTree.lock);
421 /* Figure out when we next need to be scheduled */
422 if (eventTree.first != NULL) {
423 *wait = eventSchedule.next = eventTree.first->eventTime;
424 ret = eventSchedule.raised = 1;
425 clock_Sub(wait, &now);
427 ret = eventSchedule.raised = 0;
430 MUTEX_EXIT(&eventTree.lock);
436 shutdown_rxevent(void)
438 struct malloclist *mrec, *nmrec;
443 MUTEX_DESTROY(&eventTree.lock);
445 #if !defined(AFS_AIX32_ENV) || !defined(KERNEL)
446 MUTEX_DESTROY(&freeEvents.lock);
447 mrec = freeEvents.mallocs;
450 osi_Free(mrec->mem, mrec->size);
451 osi_Free(mrec, sizeof(struct malloclist));