2 * Copyright (c) 2011 Your File System Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR `AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 /* A reimplementation of the rx_event handler using red/black trees
27 * The first rx_event implementation used a simple sorted queue of all
28 * events, which lead to O(n^2) performance, where n is the number of
29 * outstanding events. This was found to scale poorly, so was replaced.
31 * The second implementation used a set of per-second buckets to store
32 * events. Each bucket (referred to as an epoch in the code) stored all
33 * of the events which expired in that second. However, on modern networks
34 * where RTT times are in the millisecond, most connections will have events
35 * expiring within the next second, so the problem reoccurs.
37 * This new implementation uses Red-Black trees to store a sorted list of
38 * events. Red Black trees are guaranteed to have no worse than O(log N)
39 * insertion, and are commonly used in timer applications
42 #include <afsconfig.h>
43 #include <afs/param.h>
46 # include "afs/sysincludes.h"
47 # include "afsincludes.h"
53 #include <opr/queue.h>
54 #include <opr/rbtree.h>
57 #include "rx_atomic.h"
62 struct opr_rbtree_node node;
63 struct clock eventTime;
67 void (*func)(struct rxevent *, void *, void *, int);
76 struct malloclist *next;
81 struct opr_queue list;
82 struct malloclist *mallocs;
87 struct opr_rbtree head;
88 struct rxevent *first;
99 static int allocUnit = 10;
101 static struct rxevent *
102 rxevent_alloc(void) {
103 struct rxevent *evlist;
105 struct malloclist *mrec;
108 MUTEX_ENTER(&freeEvents.lock);
109 if (opr_queue_IsEmpty(&freeEvents.list)) {
110 MUTEX_EXIT(&freeEvents.lock);
112 #if defined(AFS_AIX32_ENV) && defined(KERNEL)
113 ev = rxi_Alloc(sizeof(struct rxevent));
115 evlist = osi_Alloc(sizeof(struct rxevent) * allocUnit);
116 mrec = osi_Alloc(sizeof(struct malloclist));
119 mrec->size = sizeof(struct rxevent) * allocUnit;
121 MUTEX_ENTER(&freeEvents.lock);
122 for (i = 1; i < allocUnit; i++) {
123 opr_queue_Append(&freeEvents.list, &evlist[i].q);
125 mrec->next = freeEvents.mallocs;
126 freeEvents.mallocs = mrec;
127 MUTEX_EXIT(&freeEvents.lock);
131 ev = opr_queue_First(&freeEvents.list, struct rxevent, q);
132 opr_queue_Remove(&ev->q);
133 MUTEX_EXIT(&freeEvents.lock);
136 memset(ev, 0, sizeof(struct rxevent));
137 rx_atomic_set(&ev->refcnt, 1);
143 rxevent_free(struct rxevent *ev) {
144 MUTEX_ENTER(&freeEvents.lock);
145 opr_queue_Prepend(&freeEvents.list, &ev->q);
146 MUTEX_EXIT(&freeEvents.lock);
150 rxevent_put(struct rxevent *ev) {
151 if (rx_atomic_dec_and_read(&ev->refcnt) == 0) {
157 rxevent_Put(struct rxevent *ev) {
161 static_inline struct rxevent *
162 rxevent_get(struct rxevent *ev) {
163 rx_atomic_inc(&ev->refcnt);
168 rxevent_Get(struct rxevent *ev) {
169 return rxevent_get(ev);
172 /* Called if the time now is older than the last time we recorded running an
173 * event. This test catches machines where the system time has been set
174 * backwards, and avoids RX completely stalling when timers fail to fire.
176 * Take the different between now and the last event time, and subtract that
177 * from the timing of every event on the system. This does a relatively slow
178 * walk of the completely eventTree, but time-travel will hopefully be a pretty
181 * This can only safely be called from the event thread, as it plays with the
188 struct opr_rbtree_node *node;
189 struct clock adjTime, now;
191 MUTEX_ENTER(&eventTree.lock);
192 /* Time adjustment is expensive, make absolutely certain that we have
193 * to do it, by getting an up to date time to base our decision on
194 * once we've acquired the relevant locks.
197 if (!clock_Lt(&now, &eventSchedule.last))
200 adjTime = eventSchedule.last;
201 clock_Zero(&eventSchedule.last);
203 clock_Sub(&adjTime, &now);
205 node = opr_rbtree_first(&eventTree.head);
207 struct rxevent *event = opr_containerof(node, struct rxevent, node);
209 clock_Sub(&event->eventTime, &adjTime);
210 node = opr_rbtree_next(node);
212 eventSchedule.next = eventTree.first->eventTime;
215 MUTEX_EXIT(&eventTree.lock);
218 static int initialised = 0;
220 rxevent_Init(int nEvents, void (*scheduler)(void))
228 MUTEX_INIT(&eventTree.lock, "event tree lock", MUTEX_DEFAULT, 0);
229 opr_rbtree_init(&eventTree.head);
231 MUTEX_INIT(&freeEvents.lock, "free events lock", MUTEX_DEFAULT, 0);
232 opr_queue_Init(&freeEvents.list);
233 freeEvents.mallocs = NULL;
238 clock_Zero(&eventSchedule.next);
239 clock_Zero(&eventSchedule.last);
240 eventSchedule.raised = 0;
241 eventSchedule.func = scheduler;
245 rxevent_Post(struct clock *when, struct clock *now,
246 void (*func) (struct rxevent *, void *, void *, int),
247 void *arg, void *arg1, int arg2)
249 struct rxevent *ev, *event;
250 struct opr_rbtree_node **childptr, *parent = NULL;
252 ev = rxevent_alloc();
253 ev->eventTime = *when;
259 if (clock_Lt(now, &eventSchedule.last))
262 MUTEX_ENTER(&eventTree.lock);
264 /* Work out where in the tree we'll be storing this */
265 childptr = &eventTree.head.root;
268 event = opr_containerof((*childptr), struct rxevent, node);
271 if (clock_Lt(when, &event->eventTime))
272 childptr = &(*childptr)->left;
273 else if (clock_Gt(when, &event->eventTime))
274 childptr = &(*childptr)->right;
276 opr_queue_Append(&event->q, &ev->q);
280 opr_queue_Init(&ev->q);
281 opr_rbtree_insert(&eventTree.head, parent, childptr, &ev->node);
283 if (eventTree.first == NULL ||
284 clock_Lt(when, &(eventTree.first->eventTime))) {
285 eventTree.first = ev;
286 eventSchedule.raised = 1;
287 clock_Zero(&eventSchedule.next);
288 MUTEX_EXIT(&eventTree.lock);
289 if (eventSchedule.func != NULL)
290 (*eventSchedule.func)();
291 return rxevent_get(ev);
295 MUTEX_EXIT(&eventTree.lock);
296 return rxevent_get(ev);
299 /* We're going to remove ev from the tree, so set the first pointer to the
300 * next event after it */
302 resetFirst(struct rxevent *ev)
304 struct opr_rbtree_node *next = opr_rbtree_next(&ev->node);
306 eventTree.first = opr_containerof(next, struct rxevent, node);
308 eventTree.first = NULL;
312 rxevent_Cancel(struct rxevent **evp, struct rx_call *call, int type)
314 struct rxevent *event;
321 MUTEX_ENTER(&eventTree.lock);
323 if (!event->handled) {
324 /* We're a node on the red/black tree. If our list is non-empty,
325 * then swap the first element in the list in in our place,
326 * promoting it to the list head */
327 if (event->node.parent == NULL
328 && eventTree.head.root != &event->node) {
329 /* Not in the rbtree, therefore must be a list element */
330 opr_queue_Remove(&event->q);
332 if (!opr_queue_IsEmpty(&event->q)) {
333 struct rxevent *next;
335 next = opr_queue_First(&event->q, struct rxevent, q);
336 opr_queue_Remove(&next->q); /* Remove ourselves from list */
337 if (event->q.prev == &event->q) {
338 next->q.prev = next->q.next = &next->q;
341 next->q.prev->next = &next->q;
342 next->q.next->prev = &next->q;
345 opr_rbtree_replace(&eventTree.head, &event->node,
348 if (eventTree.first == event)
349 eventTree.first = next;
352 if (eventTree.first == event)
355 opr_rbtree_remove(&eventTree.head, &event->node);
359 rxevent_put(event); /* Dispose of eventTree reference */
362 MUTEX_EXIT(&eventTree.lock);
365 rxevent_put(event); /* Dispose of caller's reference */
368 CALL_RELE(call, type);
371 /* Process all events which have expired. If events remain, then the relative
372 * time until the next event is returned in the parameter 'wait', and the
373 * function returns 1. If no events currently remain, the function returns 0
375 * If the current time is older than that of the last event processed, then we
376 * assume that time has gone backwards (for example, due to a system time reset)
377 * When this happens, all events in the current queue are rescheduled, using
378 * the difference between the current time and the last event time as a delta
382 rxevent_RaiseEvents(struct clock *wait)
385 struct rxevent *event;
390 /* Check for time going backwards */
391 if (clock_Lt(&now, &eventSchedule.last))
393 eventSchedule.last = now;
395 MUTEX_ENTER(&eventTree.lock);
396 /* Lock our event tree */
397 while (eventTree.first != NULL
398 && clock_Lt(&eventTree.first->eventTime, &now)) {
400 /* Grab the next node, either in the event's list, or in the tree node
401 * itself, and remove it from the event tree */
402 event = eventTree.first;
403 if (!opr_queue_IsEmpty(&event->q)) {
404 event = opr_queue_Last(&event->q, struct rxevent, q);
405 opr_queue_Remove(&event->q);
408 opr_rbtree_remove(&eventTree.head, &event->node);
411 MUTEX_EXIT(&eventTree.lock);
413 /* Fire the event, then free the structure */
414 event->func(event, event->arg, event->arg1, event->arg2);
417 MUTEX_ENTER(&eventTree.lock);
420 /* Figure out when we next need to be scheduled */
421 if (eventTree.first != NULL) {
422 *wait = eventSchedule.next = eventTree.first->eventTime;
423 ret = eventSchedule.raised = 1;
424 clock_Sub(wait, &now);
426 ret = eventSchedule.raised = 0;
429 MUTEX_EXIT(&eventTree.lock);
435 shutdown_rxevent(void)
437 struct malloclist *mrec, *nmrec;
442 MUTEX_DESTROY(&eventTree.lock);
444 #if !defined(AFS_AIX32_ENV) || !defined(KERNEL)
445 MUTEX_DESTROY(&freeEvents.lock);
446 mrec = freeEvents.mallocs;
449 osi_Free(mrec->mem, mrec->size);
450 osi_Free(mrec, sizeof(struct malloclist));