2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
9 * Portions Copyright (c) 2006 Sine Nomine Associates
13 * NEW callback package callback.c (replaces vicecb.c)
14 * Updated call back routines, NOW with:
16 * Faster DeleteVenus (Now called DeleteAllCallBacks)
17 * Call back breaking for volumes
18 * Adaptive timeouts on call backs
19 * Architected for Multi RPC
20 * No locks (currently implicit vnode locks--these will go, to)
21 * Delayed call back when rpc connection down.
22 * Bulk break of delayed call backs when rpc connection
24 * Strict limit on number of call backs.
26 * InitCallBack(nblocks)
27 * Initialize: nblocks is max number # of file entries + # of callback entries
28 * nblocks must be < 65536
29 * Space used is nblocks*16 bytes
30 * Note that space will be reclaimed by breaking callbacks of old hosts
32 * time = AddCallBack(host, fid)
34 * Returns the expiration time at the workstation.
36 * BreakCallBack(host, fid)
37 * Break all call backs for fid, except for the specified host.
40 * BreakVolumeCallBacksLater(volume)
41 * Break all call backs on volume, using single call to each host
42 * Delete all the call backs.
44 * DeleteCallBack(host,fid)
45 * Delete (do not break) single call back for fid.
47 * DeleteFileCallBacks(fid)
48 * Delete (do not break) all call backs for fid.
50 * DeleteAllCallBacks(host)
51 * Delete (do not break) all call backs for host.
53 * CleanupTimedOutCallBacks()
54 * Delete all timed out call back entries
55 * Must be called periodically by file server.
57 * BreakDelayedCallBacks(host)
58 * Break all delayed call backs for host.
59 * Returns 1: one or more failed, 0: success.
61 * PrintCallBackStats()
62 * Print statistics about call backs to stdout.
64 * DumpCallBacks() ---wishful thinking---
65 * Dump call back state to /tmp/callback.state.
66 * This is separately interpretable by the program pcb.
68 * Notes: In general, if a call back to a host doesn't get through,
69 * then HostDown, supplied elsewhere, is called. BreakDelayedCallBacks,
70 * however, does not call HostDown, but instead returns an indication of
71 * success if all delayed call backs were finally broken.
73 * BreakDelayedCallBacks MUST be called at the first sign of activity
74 * from the host after HostDown has been called (or a previous
75 * BreakDelayedCallBacks failed). The BreakDelayedCallBacks must be
76 * allowed to complete before any requests from that host are handled.
77 * If BreakDelayedCallBacks fails, then the host should remain
78 * down (and the request should be failed).
80 * CleanupCallBacks MUST be called periodically by the file server for
81 * this package to work correctly. Every 5 minutes is suggested.
84 #include <afsconfig.h>
85 #include <afs/param.h>
90 #ifdef HAVE_SYS_FILE_H
96 #include <afs/nfs.h> /* yuck. This is an abomination. */
98 #include <rx/rx_queue.h>
99 #include <afs/afscbint.h>
100 #include <afs/afsutil.h>
101 #include <afs/ihandle.h>
102 #include <afs/partition.h>
103 #include <afs/vnode.h>
104 #include <afs/volume.h>
105 #include "viced_prototypes.h"
108 #include <afs/ptclient.h> /* need definition of prlist for host.h */
110 #include "callback.h"
111 #ifdef AFS_DEMAND_ATTACH_FS
112 #include "serialize_state.h"
113 #endif /* AFS_DEMAND_ATTACH_FS */
116 extern afsUUID FS_HostUUID;
117 extern int hostCount;
119 #ifndef INTERPRET_DUMP
120 static int ShowProblems = 1;
123 struct cbcounters cbstuff;
125 static struct FileEntry * FE = NULL; /* don't use FE[0] */
126 static struct CallBack * CB = NULL; /* don't use CB[0] */
128 static struct CallBack * CBfree = NULL;
129 static struct FileEntry * FEfree = NULL;
132 /* Time to live for call backs depends upon number of users of the file.
133 * TimeOuts is indexed by this number/8 (using TimeOut macro). Times
134 * in this table are for the workstation; server timeouts, add
137 static int TimeOuts[] = {
138 /* Note: don't make the first entry larger than 4 hours (see above) */
139 4 * 60 * 60, /* 0-7 users */
140 1 * 60 * 60, /* 8-15 users */
141 30 * 60, /* 16-23 users */
142 15 * 60, /* 24-31 users */
143 15 * 60, /* 32-39 users */
144 10 * 60, /* 40-47 users */
145 10 * 60, /* 48-55 users */
146 10 * 60, /* 56-63 users */
147 }; /* Anything more: MinTimeOut */
149 /* minimum time given for a call back */
150 #ifndef INTERPRET_DUMP
151 static int MinTimeOut = (7 * 60);
154 /* Heads of CB queues; a timeout index is 1+index into this array */
155 static afs_uint32 timeout[CB_NUM_TIMEOUT_QUEUES];
157 static afs_int32 tfirst; /* cbtime of oldest unexpired call back time queue */
160 /* 16 byte object get/free routines */
165 /* Prototypes for static routines */
166 static struct FileEntry *FindFE(AFSFid * fid);
168 #ifndef INTERPRET_DUMP
169 static struct CallBack *iGetCB(int *nused);
170 static int iFreeCB(struct CallBack *cb, int *nused);
171 static struct FileEntry *iGetFE(int *nused);
172 static int iFreeFE(struct FileEntry *fe, int *nused);
173 static int TAdd(struct CallBack *cb, afs_uint32 * thead);
174 static int TDel(struct CallBack *cb);
175 static int HAdd(struct CallBack *cb, struct host *host);
176 static int HDel(struct CallBack *cb);
177 static int CDel(struct CallBack *cb, int deletefe);
178 static int CDelPtr(struct FileEntry *fe, afs_uint32 * cbp,
180 static afs_uint32 *FindCBPtr(struct FileEntry *fe, struct host *host);
181 static int FDel(struct FileEntry *fe);
182 static int AddCallBack1_r(struct host *host, AFSFid * fid, afs_uint32 * thead,
183 int type, int locked);
184 static void MultiBreakCallBack_r(struct cbstruct cba[], int ncbas,
185 struct AFSCBFids *afidp);
186 static int MultiBreakVolumeCallBack_r(struct host *host,
187 struct VCBParams *parms, int deletefe);
188 static int MultiBreakVolumeLaterCallBack(struct host *host, void *rock);
189 static int GetSomeSpace_r(struct host *hostp, int locked);
190 static int ClearHostCallbacks_r(struct host *hp, int locked);
191 static int DumpCallBackState_r(void);
194 #define GetCB() ((struct CallBack *)iGetCB(&cbstuff.nCBs))
195 #define GetFE() ((struct FileEntry *)iGetFE(&cbstuff.nFEs))
196 #define FreeCB(cb) iFreeCB((struct CallBack *)cb, &cbstuff.nCBs)
197 #define FreeFE(fe) iFreeFE((struct FileEntry *)fe, &cbstuff.nFEs)
200 /* Other protos - move out sometime */
201 void PrintCB(struct CallBack *cb, afs_uint32 now);
203 static afs_uint32 HashTable[FEHASH_SIZE]; /* File entry hash table */
205 static struct FileEntry *
210 struct FileEntry *fe;
212 hash = FEHash(fid->Volume, fid->Unique);
213 for (fei = HashTable[hash]; fei; fei = fe->fnext) {
215 if (fe->volid == fid->Volume && fe->unique == fid->Unique
216 && fe->vnode == fid->Vnode && (fe->status & FE_LATER) != FE_LATER)
222 #ifndef INTERPRET_DUMP
224 static struct CallBack *
227 struct CallBack *ret;
229 if ((ret = CBfree)) {
230 CBfree = (struct CallBack *)(((struct object *)ret)->next);
237 iFreeCB(struct CallBack *cb, int *nused)
239 ((struct object *)cb)->next = (struct object *)CBfree;
245 static struct FileEntry *
248 struct FileEntry *ret;
250 if ((ret = FEfree)) {
251 FEfree = (struct FileEntry *)(((struct object *)ret)->next);
258 iFreeFE(struct FileEntry *fe, int *nused)
260 ((struct object *)fe)->next = (struct object *)FEfree;
266 /* Add cb to end of specified timeout list */
268 TAdd(struct CallBack *cb, afs_uint32 * thead)
271 (*thead) = cb->tnext = cb->tprev = cbtoi(cb);
273 struct CallBack *thp = itocb(*thead);
275 cb->tprev = thp->tprev;
279 thp->tprev = (itocb(thp->tprev)->tnext = cbtoi(cb));
281 thp->tprev = cbtoi(cb);
284 cb->thead = ttoi(thead);
288 /* Delete call back entry from timeout list */
290 TDel(struct CallBack *cb)
292 afs_uint32 *thead = itot(cb->thead);
294 if (*thead == cbtoi(cb))
295 *thead = (*thead == cb->tnext ? 0 : cb->tnext);
296 if (itocb(cb->tprev))
297 itocb(cb->tprev)->tnext = cb->tnext;
298 if (itocb(cb->tnext))
299 itocb(cb->tnext)->tprev = cb->tprev;
303 /* Add cb to end of specified host list */
305 HAdd(struct CallBack *cb, struct host *host)
307 cb->hhead = h_htoi(host);
309 host->cblist = cb->hnext = cb->hprev = cbtoi(cb);
311 struct CallBack *fcb = itocb(host->cblist);
313 cb->hprev = fcb->hprev;
314 cb->hnext = cbtoi(fcb);
315 fcb->hprev = (itocb(fcb->hprev)->hnext = cbtoi(cb));
320 /* Delete call back entry from host list */
322 HDel(struct CallBack *cb)
324 afs_uint32 *hhead = &h_itoh(cb->hhead)->cblist;
326 if (*hhead == cbtoi(cb))
327 *hhead = (*hhead == cb->hnext ? 0 : cb->hnext);
328 itocb(cb->hprev)->hnext = cb->hnext;
329 itocb(cb->hnext)->hprev = cb->hprev;
333 /* Delete call back entry from fid's chain of cb's */
334 /* N.B. This one also deletes the CB, and also possibly parent FE, so
335 * make sure that it is not on any other list before calling this
338 CDel(struct CallBack *cb, int deletefe)
341 struct FileEntry *fe = itofe(cb->fhead);
345 for (safety = 0, cbp = &fe->firstcb; *cbp && *cbp != cbi;
346 cbp = &itocb(*cbp)->cnext, safety++) {
347 if (safety > cbstuff.nblks + 10) {
348 ViceLogThenPanic(0, ("CDel: Internal Error -- shutting down: "
349 "wanted %d from %d, now at %d\n",
350 cbi, fe->firstcb, *cbp));
351 DumpCallBackState_r();
352 ShutDownAndCore(PANIC);
355 CDelPtr(fe, cbp, deletefe);
359 /* Same as CDel, but pointer to parent pointer to CB entry is passed,
360 * as well as file entry */
361 /* N.B. This one also deletes the CB, and also possibly parent FE, so
362 * make sure that it is not on any other list before calling this
364 static int Ccdelpt = 0, CcdelB = 0;
367 CDelPtr(struct FileEntry *fe, afs_uint32 * cbp,
380 if ((--fe->ncbs == 0) && deletefe)
386 FindCBPtr(struct FileEntry *fe, struct host *host)
388 afs_uint32 hostindex = h_htoi(host);
393 for (safety = 0, cbp = &fe->firstcb; *cbp; cbp = &cb->cnext, safety++) {
394 if (safety > cbstuff.nblks) {
395 ViceLog(0, ("FindCBPtr: Internal Error -- shutting down.\n"));
396 DumpCallBackState_r();
397 ShutDownAndCore(PANIC);
400 if (cb->hhead == hostindex)
406 /* Delete file entry from hash table */
408 FDel(struct FileEntry *fe)
411 afs_uint32 *p = &HashTable[FEHash(fe->volid, fe->unique)];
413 while (*p && *p != fei)
414 p = &itofe(*p)->fnext;
421 /* initialize the callback package */
423 InitCallBack(int nblks)
425 opr_Assert(nblks > 0);
428 tfirst = CBtime(time(NULL));
429 /* N.B. The "-1", below, is because
430 * FE[0] and CB[0] are not used--and not allocated */
431 FE = calloc(nblks, sizeof(struct FileEntry));
433 ViceLogThenPanic(0, ("Failed malloc in InitCallBack\n"));
435 FE--; /* FE[0] is supposed to point to junk */
436 cbstuff.nFEs = nblks;
438 FreeFE(&FE[cbstuff.nFEs]); /* This is correct */
439 CB = calloc(nblks, sizeof(struct CallBack));
441 ViceLogThenPanic(0, ("Failed malloc in InitCallBack\n"));
443 CB--; /* CB[0] is supposed to point to junk */
444 cbstuff.nCBs = nblks;
446 FreeCB(&CB[cbstuff.nCBs]); /* This is correct */
447 cbstuff.nblks = nblks;
448 cbstuff.nbreakers = 0;
454 XCallBackBulk_r(struct host * ahost, struct AFSFid * fids, afs_int32 nfids)
456 struct AFSCallBack tcbs[AFSCBMAX];
462 struct rx_connection *cb_conn = NULL;
464 rx_SetConnDeadTime(ahost->callback_rxcon, 4);
465 rx_SetConnHardDeadTime(ahost->callback_rxcon, AFS_HARDDEADTIME);
471 for (i = 0; i < nfids && i < AFSCBMAX; i++) {
472 tcbs[i].CallBackVersion = CALLBACK_VERSION;
473 tcbs[i].ExpirationTime = 0;
474 tcbs[i].CallBackType = CB_DROPPED;
476 tf.AFSCBFids_len = i;
477 tf.AFSCBFids_val = &(fids[j]);
481 tc.AFSCBs_val = tcbs;
483 cb_conn = ahost->callback_rxcon;
484 rx_GetConnection(cb_conn);
486 code |= RXAFSCB_CallBack(cb_conn, &tf, &tc);
487 rx_PutConnection(cb_conn);
495 /* the locked flag tells us if the host entry has already been locked
496 * by our parent. I don't think anybody actually calls us with the
497 * host locked, but here's how to make that work: GetSomeSpace has to
498 * change so that it doesn't attempt to lock any hosts < "host". That
499 * means that it might be unable to free any objects, so it has to
500 * return an exit status. If it fails, then AddCallBack1 might fail,
501 * as well. If so, the host->ResetDone should probably be set to 0,
502 * and we probably don't want to return a callback promise to the
503 * cache manager, either. */
505 AddCallBack1(struct host *host, AFSFid * fid, afs_uint32 * thead, int type,
513 if (!(host->hostFlags & HOSTDELETED))
514 retVal = AddCallBack1_r(host, fid, thead, type, 1);
524 AddCallBack1_r(struct host *host, AFSFid * fid, afs_uint32 * thead, int type,
527 struct FileEntry *fe;
528 struct CallBack *cb = 0, *lastcb = 0;
529 struct FileEntry *newfe = 0;
530 afs_uint32 time_out = 0;
531 afs_uint32 *Thead = thead;
532 struct CallBack *newcb = 0;
535 cbstuff.AddCallBacks++;
539 /* allocate these guys first, since we can't call the allocator with
540 * the host structure locked -- or we might deadlock. However, we have
541 * to avoid races with FindFE... */
542 while (!(newcb = GetCB())) {
543 GetSomeSpace_r(host, locked);
545 while (!(newfe = GetFE())) { /* Get it now, so we don't have to call */
546 /* GetSomeSpace with the host locked, later. This might turn out to */
547 /* have been unneccessary, but that's actually kind of unlikely, since */
548 /* most files are not shared. */
549 GetSomeSpace_r(host, locked);
553 h_Lock_r(host); /* this can yield, so do it before we get any */
555 if (host->hostFlags & HOSTDELETED) {
563 if (type == CB_NORMAL) {
565 TimeCeiling(time(NULL) + TimeOut(fe ? fe->ncbs : 0) +
567 Thead = THead(CBtime(time_out));
568 } else if (type == CB_VOLUME) {
569 time_out = TimeCeiling((60 * 120 + time(NULL)) + ServerBias);
570 Thead = THead(CBtime(time_out));
571 } else if (type == CB_BULK) {
572 /* bulk status can get so many callbacks all at once, and most of them
573 * are probably not for things that will be used for long.
576 TimeCeiling(time(NULL) + ServerBias +
577 TimeOut(22 + (fe ? fe->ncbs : 0)));
578 Thead = THead(CBtime(time_out));
589 fe->volid = fid->Volume;
590 fe->vnode = fid->Vnode;
591 fe->unique = fid->Unique;
594 hash = FEHash(fid->Volume, fid->Unique);
595 fe->fnext = HashTable[hash];
596 HashTable[hash] = fetoi(fe);
598 for (safety = 0, lastcb = cb = itocb(fe->firstcb); cb;
599 lastcb = cb, cb = itocb(cb->cnext), safety++) {
600 if (safety > cbstuff.nblks) {
601 ViceLog(0, ("AddCallBack1: Internal Error -- shutting down.\n"));
602 DumpCallBackState_r();
603 ShutDownAndCore(PANIC);
605 if (cb->hhead == h_htoi(host))
608 if (cb) { /* Already have call back: move to new timeout list */
609 /* don't change delayed callbacks back to normal ones */
610 if (cb->status != CB_DELAYED)
612 /* Only move if new timeout is longer */
613 if (TNorm(ttoi(Thead)) > TNorm(cb->thead)) {
617 if (newfe == NULL) { /* we are using the new FE */
618 fe->firstcb = cbtoi(cb);
620 cb->fhead = fetoi(fe);
625 *(lastcb ? &lastcb->cnext : &fe->firstcb) = cbtoi(cb);
628 cb->fhead = fetoi(fe);
634 /* now free any still-unused callback or host entries */
640 if (!locked) /* freecb and freefe might(?) yield */
643 if (type == CB_NORMAL || type == CB_VOLUME || type == CB_BULK)
644 return time_out - ServerBias; /* Expires sooner at workstation */
650 CompareCBA(const void *e1, const void *e2)
652 const struct cbstruct *cba1 = (const struct cbstruct *)e1;
653 const struct cbstruct *cba2 = (const struct cbstruct *)e2;
654 return ((cba1->hp)->index - (cba2->hp)->index);
657 /* Take an array full of hosts, all held. Break callbacks to them, and
658 * release the holds once you're done.
659 * Currently only works for a single Fid in afidp array.
660 * If you want to make this work with multiple fids, you need to fix
661 * the error handling. One approach would be to force a reset if a
662 * multi-fid call fails, or you could add delayed callbacks for each
663 * fid. You probably also need to sort and remove duplicate hosts.
664 * When this is called from the BreakVolumeCallBacks path, it does NOT
665 * force a reset if the RPC fails, it just marks the host down and tries
666 * to create a delayed callback. */
667 /* N.B. be sure that code works when ncbas == 0 */
668 /* N.B. requires all the cba[*].hp pointers to be valid... */
669 /* This routine does not hold a lock on the host for the duration of
670 * the BreakCallBack RPC, which is a significant deviation from tradition.
671 * It _does_ get a lock on the host before setting VenusDown = 1,
672 * which is sufficient only if VenusDown = 0 only happens when the
673 * lock is held over the RPC and the subsequent VenusDown == 0
674 * wherever that is done. */
676 MultiBreakCallBack_r(struct cbstruct cba[], int ncbas,
677 struct AFSCBFids *afidp)
680 struct rx_connection *conns[MAX_CB_HOSTS];
681 static struct AFSCBs tc = { 0, 0 };
682 int multi_to_cba_map[MAX_CB_HOSTS];
684 opr_Assert(ncbas <= MAX_CB_HOSTS);
687 * When we issue a multi_Rx callback break, we must rx_NewCall a call for
688 * each host before we do anything. If there are no call channels
689 * available on the conn, we must wait for one of the existing calls to
690 * finish. If another thread is breaking callbacks at the same time, it is
691 * possible for us to be waiting on NewCall for one of their multi_Rx
692 * CallBack calls to finish, but they are waiting on NewCall for one of
693 * our calls to finish. So we deadlock.
695 * This can be thought of as similar to obtaining multiple locks at the
696 * same time. So if we establish an ordering, the possibility of deadlock
697 * goes away. Here we provide such an ordering, by sorting our CBAs
698 * according to CompareCBA.
700 qsort(cba, ncbas, sizeof(struct cbstruct), CompareCBA);
702 /* set up conns for multi-call */
703 for (i = 0, j = 0; i < ncbas; i++) {
704 struct host *thishost = cba[i].hp;
705 if (!thishost || (thishost->hostFlags & HOSTDELETED)) {
708 rx_GetConnection(thishost->callback_rxcon);
709 multi_to_cba_map[j] = i;
710 conns[j++] = thishost->callback_rxcon;
712 rx_SetConnDeadTime(thishost->callback_rxcon, 4);
713 rx_SetConnHardDeadTime(thishost->callback_rxcon, AFS_HARDDEADTIME);
716 if (j) { /* who knows what multi would do with 0 conns? */
720 multi_RXAFSCB_CallBack(afidp, &tc);
726 i = multi_to_cba_map[multi_i];
732 ("BCB: INTERNAL ERROR: hp=%p, cba=%p, thead=%u\n",
736 ** try breaking callbacks on alternate interface addresses
738 if (MultiBreakCallBackAlternateAddress(hp, afidp)) {
741 ("BCB: Failed on file %u.%u.%u, "
742 "Host %p (%s:%d) is down\n",
743 afidp->AFSCBFids_val->Volume,
744 afidp->AFSCBFids_val->Vnode,
745 afidp->AFSCBFids_val->Unique,
747 afs_inet_ntoa_r(hp->host, hoststr),
753 if (!(hp->hostFlags & HOSTDELETED)) {
754 hp->hostFlags |= VENUSDOWN;
756 * We always go into AddCallBack1_r with the host locked
758 AddCallBack1_r(hp, afidp->AFSCBFids_val, itot(idx),
772 for (i = 0; i < ncbas; i++) {
780 /* H_UNLOCK around this so h_FreeConnection does not deadlock.
781 h_FreeConnection should *never* be called on a callback connection,
782 but on 10/27/04 a deadlock occurred where it was, when we know why,
783 this should be reverted. -- shadow */
785 for (i = 0; i < j; i++) {
786 rx_PutConnection(conns[i]);
794 * Break all call backs for fid, except for the specified host (unless flag
795 * is true, in which case all get a callback message. Assumption: the specified
796 * host is h_Held, by the caller; the others aren't.
797 * Specified host may be bogus, that's ok. This used to check to see if the
798 * host was down in two places, once right after the host was h_held, and
799 * again after it was locked. That race condition is incredibly rare and
800 * relatively harmless even when it does occur, so we don't check for it now.
802 /* if flag is true, send a break callback msg to "host", too */
804 BreakCallBack(struct host *xhost, AFSFid * fid, int flag)
806 struct FileEntry *fe;
807 struct CallBack *cb, *nextcb;
808 struct cbstruct cbaDef[MAX_CB_HOSTS], *cba = cbaDef;
809 unsigned int ncbas, cbaAlloc = MAX_CB_HOSTS;
816 ("BCB: BreakCallBack(Host %p all but %s:%d, (%u,%u,%u))\n",
817 xhost, afs_inet_ntoa_r(xhost->host, hoststr), ntohs(xhost->port),
818 fid->Volume, fid->Vnode, fid->Unique));
821 ("BCB: BreakCallBack(No Host, (%u,%u,%u))\n",
822 fid->Volume, fid->Vnode, fid->Unique));
825 cbstuff.BreakCallBacks++;
830 hostindex = xhost ? h_htoi(xhost) : 0;
831 cb = itocb(fe->firstcb);
832 if (!cb || ((fe->ncbs == 1) && (cb->hhead == hostindex) && !flag)) {
833 /* the most common case is what follows the || */
836 tf.AFSCBFids_len = 1;
837 tf.AFSCBFids_val = fid;
839 for (ncbas = 0; cb ; cb = nextcb) {
840 nextcb = itocb(cb->cnext);
841 if ((cb->hhead != hostindex || flag)
842 && (cb->status == CB_BULK || cb->status == CB_NORMAL
843 || cb->status == CB_VOLUME)) {
844 struct host *thishost = h_itoh(cb->hhead);
846 ViceLog(0, ("BCB: BOGUS! cb->hhead is NULL!\n"));
847 } else if (thishost->hostFlags & VENUSDOWN) {
849 ("BCB: %p (%s:%d) is down; delaying break call back\n",
850 thishost, afs_inet_ntoa_r(thishost->host, hoststr),
851 ntohs(thishost->port)));
852 cb->status = CB_DELAYED;
854 if (!(thishost->hostFlags & HOSTDELETED)) {
856 if (ncbas == cbaAlloc) { /* Need more space */
857 int curLen = cbaAlloc*sizeof(cba[0]);
858 struct cbstruct *cbaOld = (cba == cbaDef) ? NULL : cba;
860 /* There are logical contraints elsewhere that the number of hosts
861 (i.e. h_HTSPERBLOCK*h_MAXHOSTTABLES) remains in the realm of a signed "int".
862 cbaAlloc is defined unsigned int hence doubling below cannot overflow
864 cbaAlloc = cbaAlloc<<1; /* double */
865 cba = realloc(cbaOld, cbaAlloc * sizeof(cba[0]));
867 if (cbaOld == NULL) { /* realloc wouldn't have copied from cbaDef */
868 memcpy(cba, cbaDef, curLen);
871 cba[ncbas].hp = thishost;
872 cba[ncbas].thead = cb->thead;
877 CDel(cb, 1); /* Usually first; so this delete
878 * is reasonably inexpensive */
884 struct cbstruct *cba2;
887 for (cba2 = cba, num = ncbas; ncbas > 0; cba2 += num, ncbas -= num) {
888 num = (ncbas > MAX_CB_HOSTS) ? MAX_CB_HOSTS : ncbas;
889 MultiBreakCallBack_r(cba2, num, &tf);
893 if (cba != cbaDef) free(cba);
900 /* Delete (do not break) single call back for fid */
902 DeleteCallBack(struct host *host, AFSFid * fid)
904 struct FileEntry *fe;
909 cbstuff.DeleteCallBacks++;
912 /* do not care if the host has been HOSTDELETED */
918 ("DCB: No call backs for fid (%u, %u, %u)\n", fid->Volume,
919 fid->Vnode, fid->Unique));
922 pcb = FindCBPtr(fe, host);
925 ("DCB: No call back for host %p (%s:%d), (%u, %u, %u)\n",
926 host, afs_inet_ntoa_r(host->host, hoststr), ntohs(host->port),
927 fid->Volume, fid->Vnode, fid->Unique));
941 * Delete (do not break) all call backs for fid. This call doesn't
942 * set all of the various host locks, but it shouldn't really matter
943 * since we're not adding callbacks, but deleting them. I'm not sure
944 * why it doesn't set the lock, however; perhaps it should.
947 DeleteFileCallBacks(AFSFid * fid)
949 struct FileEntry *fe;
955 cbstuff.DeleteFiles++;
960 ("DF: No fid (%u,%u,%u) to delete\n", fid->Volume, fid->Vnode,
964 for (n = 0, cbi = fe->firstcb; cbi; n++) {
977 /* Delete (do not break) all call backs for host. The host should be
980 DeleteAllCallBacks_r(struct host *host, int deletefe)
985 cbstuff.DeleteAllCallBacks++;
986 cbi = first = host->cblist;
988 ViceLog(8, ("DV: no call backs\n"));
996 } while (cbi != first);
1002 * Break all delayed call backs for host. Returns 1 if all call backs
1003 * successfully broken; 0 otherwise. Assumes host is h_Held and h_Locked.
1004 * Must be called with VenusDown set for this host
1007 BreakDelayedCallBacks(struct host *host)
1011 retVal = BreakDelayedCallBacks_r(host);
1017 BreakDelayedCallBacks_r(struct host *host)
1019 struct AFSFid fids[AFSCBMAX];
1020 int cbi, first, nfids;
1021 struct CallBack *cb;
1024 struct rx_connection *cb_conn;
1026 cbstuff.nbreakers++;
1027 if (!(host->hostFlags & RESETDONE) && !(host->hostFlags & HOSTDELETED)) {
1028 host->hostFlags &= ~ALTADDR; /* alternate addresses are invalid */
1029 cb_conn = host->callback_rxcon;
1030 rx_GetConnection(cb_conn);
1031 if (host->interface) {
1034 RXAFSCB_InitCallBackState3(cb_conn, &FS_HostUUID);
1037 code = RXAFSCB_InitCallBackState(cb_conn);
1039 rx_PutConnection(cb_conn);
1042 host->hostFlags |= ALTADDR; /* alternate addresses are valid */
1046 ("CB: Call back connect back failed (in break delayed) "
1047 "for Host %p (%s:%d)\n",
1048 host, afs_inet_ntoa_r(host->host, hoststr),
1049 ntohs(host->port)));
1051 host->hostFlags |= VENUSDOWN;
1054 ("InitCallBackState success on %p (%s:%d)\n",
1055 host, afs_inet_ntoa_r(host->host, hoststr),
1056 ntohs(host->port)));
1057 /* reset was done successfully */
1058 host->hostFlags |= RESETDONE;
1059 host->hostFlags &= ~VENUSDOWN;
1062 while (!(host->hostFlags & HOSTDELETED)) {
1064 host->hostFlags &= ~VENUSDOWN; /* presume up */
1065 cbi = first = host->cblist;
1069 first = host->cblist;
1072 if (cb->status == CB_DELAYED) {
1073 struct FileEntry *fe = itofe(cb->fhead);
1074 fids[nfids].Volume = fe->volid;
1075 fids[nfids].Vnode = fe->vnode;
1076 fids[nfids].Unique = fe->unique;
1082 } while (cbi && cbi != first && nfids < AFSCBMAX);
1088 if (XCallBackBulk_r(host, fids, nfids)) {
1089 /* Failed, again: put them back, probably with old
1094 ("CB: XCallBackBulk failed, Host %p (%s:%d); "
1095 "callback list follows:\n",
1096 host, afs_inet_ntoa_r(host->host, hoststr),
1097 ntohs(host->port)));
1099 for (i = 0; i < nfids; i++) {
1102 ("CB: Host %p (%s:%d), file %u.%u.%u "
1103 "(part of bulk callback)\n",
1104 host, afs_inet_ntoa_r(host->host, hoststr),
1105 ntohs(host->port), fids[i].Volume,
1106 fids[i].Vnode, fids[i].Unique));
1109 * AddCallBack1_r(host, &fids[i], itot(thead[i]), CB_DELAYED, 1);
1110 * * but it turns out to cause too many tricky locking problems.
1111 * * now, if break delayed fails, screw it. */
1113 host->hostFlags |= VENUSDOWN; /* Failed */
1114 ClearHostCallbacks_r(host, 1 /* locked */ );
1118 if (nfids < AFSCBMAX)
1122 cbstuff.nbreakers--;
1123 /* If we succeeded it's always ok to unset HFE_LATER */
1124 if (!(host->hostFlags & VENUSDOWN))
1125 host->hostFlags &= ~HFE_LATER;
1126 return (host->hostFlags & VENUSDOWN);
1130 MultiBreakVolumeCallBack_r(struct host *host,
1131 struct VCBParams *parms, int deletefe)
1135 if (host->hostFlags & HOSTDELETED)
1138 if (!(host->hostFlags & HCBREAK))
1139 return 0; /* host is not flagged to notify */
1141 if (host->hostFlags & VENUSDOWN) {
1143 /* Do not care if the host is now HOSTDELETED */
1146 ("BVCB: volume callback for Host %p (%s:%d) failed\n",
1147 host, afs_inet_ntoa_r(host->host, hoststr),
1148 ntohs(host->port)));
1150 DeleteAllCallBacks_r(host, deletefe); /* Delete all callback state
1151 * rather than attempting to
1152 * selectively remember to
1153 * delete the volume callbacks
1155 host->hostFlags &= ~(RESETDONE|HCBREAK); /* Do InitCallBackState when host returns */
1159 opr_Assert(parms->ncbas <= MAX_CB_HOSTS);
1161 /* Do not call MultiBreakCallBack on the current host structure
1162 ** because it would prematurely release the hold on the host
1164 if (parms->ncbas == MAX_CB_HOSTS) {
1165 struct AFSCBFids tf;
1167 tf.AFSCBFids_len = 1;
1168 tf.AFSCBFids_val = parms->fid;
1170 /* this releases all the hosts */
1171 MultiBreakCallBack_r(parms->cba, parms->ncbas, &tf);
1175 parms->cba[parms->ncbas].hp = host;
1176 parms->cba[(parms->ncbas)++].thead = parms->thead;
1177 host->hostFlags &= ~HCBREAK;
1179 /* we have more work to do on this host, so make sure we keep a reference
1187 MultiBreakVolumeLaterCallBack(struct host *host, void *rock)
1189 struct VCBParams *parms = (struct VCBParams *)rock;
1192 retval = MultiBreakVolumeCallBack_r(host, parms, 0);
1198 * Break all call backs on a single volume. Don't call this with any
1199 * hosts h_held. Note that this routine clears the callbacks before
1200 * actually breaking them, and that the vnode isn't locked during this
1201 * operation, so that people might see temporary callback loss while
1202 * this function is executing. It is just a temporary state, however,
1203 * since the callback will be broken later by this same function.
1205 * Now uses multi-RX for CallBack RPC in a different thread,
1206 * only marking them here.
1208 extern pthread_cond_t fsync_cond;
1211 BreakVolumeCallBacksLater(VolumeId volume)
1215 struct FileEntry *fe;
1216 struct CallBack *cb;
1220 ViceLog(25, ("Setting later on volume %" AFS_VOLID_FMT "\n",
1221 afs_printable_VolumeId_lu(volume)));
1223 for (hash = 0; hash < FEHASH_SIZE; hash++) {
1224 for (feip = &HashTable[hash]; (fe = itofe(*feip)) != NULL; ) {
1225 if (fe->volid == volume) {
1226 struct CallBack *cbnext;
1227 for (cb = itocb(fe->firstcb); cb; cb = cbnext) {
1228 host = h_itoh(cb->hhead);
1229 host->hostFlags |= HFE_LATER;
1230 cb->status = CB_DELAYED;
1231 cbnext = itocb(cb->cnext);
1234 fe->status |= FE_LATER;
1243 /* didn't find any callbacks, so return right away. */
1247 ViceLog(25, ("Fsync thread wakeup\n"));
1249 opr_cv_broadcast(&fsync_cond);
1255 BreakLaterCallBacks(void)
1260 struct CallBack *cb;
1261 struct FileEntry *fe = NULL;
1262 struct FileEntry *myfe = NULL;
1264 struct VCBParams henumParms;
1265 unsigned short tthead = 0; /* zero is illegal value */
1269 ViceLog(25, ("Looking for FileEntries to unchain\n"));
1272 /* Pick the first volume we see to clean up */
1273 fid.Volume = fid.Vnode = fid.Unique = 0;
1275 for (hash = 0; hash < FEHASH_SIZE; hash++) {
1276 for (feip = &HashTable[hash]; (fe = itofe(*feip)) != NULL; ) {
1277 if (fe && (fe->status & FE_LATER)
1278 && (fid.Volume == 0 || fid.Volume == fe->volid)) {
1279 /* Ugly, but used to avoid left side casting */
1280 struct object *tmpfe;
1282 ("Unchaining for %u:%u:%" AFS_VOLID_FMT "\n", fe->vnode,
1283 fe->unique, afs_printable_VolumeId_lu(fe->volid)));
1284 fid.Volume = fe->volid;
1286 fe->status &= ~FE_LATER; /* not strictly needed */
1287 /* Works since volid is deeper than the largest pointer */
1288 tmpfe = (struct object *)fe;
1289 tmpfe->next = (struct object *)myfe;
1302 /* loop over FEs from myfe and free/break */
1304 for (fe = myfe; fe;) {
1305 struct CallBack *cbnext;
1306 for (cb = itocb(fe->firstcb); cb; cb = cbnext) {
1307 cbnext = itocb(cb->cnext);
1308 host = h_itoh(cb->hhead);
1309 if (cb->status == CB_DELAYED) {
1310 if (!(host->hostFlags & HOSTDELETED)) {
1311 /* mark this host for notification */
1312 host->hostFlags |= HCBREAK;
1313 if (!tthead || (TNorm(tthead) < TNorm(cb->thead))) {
1319 CDel(cb, 0); /* Don't let CDel clean up the fe */
1320 /* leave flag for MultiBreakVolumeCallBack to clear */
1323 ("Found host %p (%s:%d) non-DELAYED cb for %u:%u:%" AFS_VOLID_FMT "\n",
1324 host, afs_inet_ntoa_r(host->host, hoststr),
1325 ntohs(host->port), fe->vnode, fe->unique,
1326 afs_printable_VolumeId_lu(fe->volid)));
1330 fe = (struct FileEntry *)((struct object *)fe)->next;
1335 ViceLog(125, ("Breaking volume %u\n", fid.Volume));
1336 henumParms.ncbas = 0;
1337 henumParms.fid = &fid;
1338 henumParms.thead = tthead;
1340 h_Enumerate(MultiBreakVolumeLaterCallBack, (char *)&henumParms);
1342 if (henumParms.ncbas) { /* do left-overs */
1343 struct AFSCBFids tf;
1344 tf.AFSCBFids_len = 1;
1345 tf.AFSCBFids_val = &fid;
1347 MultiBreakCallBack_r(henumParms.cba, henumParms.ncbas, &tf);
1348 henumParms.ncbas = 0;
1353 /* Arrange to be called again */
1358 * Delete all timed-out call back entries (to be called periodically by file
1362 CleanupTimedOutCallBacks(void)
1365 CleanupTimedOutCallBacks_r();
1371 CleanupTimedOutCallBacks_r(void)
1373 afs_uint32 now = CBtime(time(NULL));
1375 struct CallBack *cb;
1379 while (tfirst <= now) {
1381 cbi = *(thead = THead(tfirst));
1387 ("CCB: deleting timed out call back %x (%s:%d), (%" AFS_VOLID_FMT ",%u,%u)\n",
1388 h_itoh(cb->hhead)->host,
1389 afs_inet_ntoa_r(h_itoh(cb->hhead)->host, hoststr),
1390 h_itoh(cb->hhead)->port,
1391 afs_printable_VolumeId_lu(itofe(cb->fhead)->volid),
1392 itofe(cb->fhead)->vnode, itofe(cb->fhead)->unique));
1396 if (ntimedout > cbstuff.nblks) {
1397 ViceLog(0, ("CCB: Internal Error -- shutting down...\n"));
1398 DumpCallBackState_r();
1399 ShutDownAndCore(PANIC);
1401 } while (cbi != *thead);
1406 cbstuff.CBsTimedOut += ntimedout;
1407 ViceLog(7, ("CCB: deleted %d timed out callbacks\n", ntimedout));
1408 return (ntimedout > 0);
1412 * parameters to pass to lih*_r from h_Enumerate_r when trying to find a host
1413 * from which to clear callbacks.
1417 * Points to the least interesting host found; try to clear callbacks on
1418 * this host after h_Enumerate_r(lih*_r)'ing.
1423 * The last host we got from lih*_r, but we couldn't clear its callbacks
1424 * for some reason. Choose the next-best host after this one (with the
1425 * current lih*_r, this means to only select hosts that have an ActiveCall
1426 * newer than lastlih).
1428 struct host *lastlih;
1431 /* Value of host->refCount that allows us to reliably infer that
1432 * host may be held by some other thread */
1433 #define OTHER_MUSTHOLD_LIH 2
1435 /* This version does not allow 'host' to be selected unless its ActiveCall
1436 * is newer than 'params->lastlih' which is the host with the oldest
1437 * ActiveCall from the last pass (if it is provided). We filter out any hosts
1438 * that are are held by other threads.
1440 * There is a small problem here, but it may not be easily fixable. Say we
1441 * select some host A, and give it back to GetSomeSpace_r. GSS_r for some
1442 * reason cannot clear the callbacks on A, and so calls us again with
1443 * lastlih = A. Suppose there is another host B that has the same ActiveCall
1444 * time as A. We will now skip over host B, since
1445 * 'hostB->ActiveCall > hostA->ActiveCall' is not true. This could result in
1446 * us prematurely going to the GSS_r 2nd or 3rd pass, and making us a little
1447 * inefficient. This should be pretty rare, though, except perhaps in cases
1448 * with very small numbers of hosts.
1450 * Also filter out any hosts with HOSTDELETED set. h_Enumerate_r should in
1451 * theory not give these to us anyway, but be paranoid.
1454 lih0_r(struct host *host, void *rock)
1456 struct lih_params *params = (struct lih_params *)rock;
1458 /* OTHER_MUSTHOLD_LIH is because the h_Enum loop holds us once */
1460 && (!(host->hostFlags & HOSTDELETED))
1461 && (host->refCount < OTHER_MUSTHOLD_LIH)
1462 && (!params->lih || host->ActiveCall < params->lih->ActiveCall)
1463 && (!params->lastlih || host->ActiveCall > params->lastlih->ActiveCall)) {
1466 h_Release_r(params->lih); /* release prev host */
1475 /* same as lih0_r, except we do not prevent held hosts from being selected. */
1477 lih1_r(struct host *host, void *rock)
1479 struct lih_params *params = (struct lih_params *)rock;
1482 && (!(host->hostFlags & HOSTDELETED))
1483 && (!params->lih || host->ActiveCall < params->lih->ActiveCall)
1484 && (!params->lastlih || host->ActiveCall > params->lastlih->ActiveCall)) {
1487 h_Release_r(params->lih); /* release prev host */
1496 /* This could be upgraded to get more space each time */
1497 /* first pass: sequentially find the oldest host which isn't held by
1498 anyone for which we can clear callbacks;
1500 /* second pass: sequentially find the oldest host regardless of
1501 whether or not the host is held; skipping 'hostp' */
1502 /* third pass: attempt to clear callbacks from 'hostp' */
1503 /* always called with hostp unlocked */
1505 /* Note: hostlist is ordered most recently created host first and
1506 * its order has no relationship to the most recently used. */
1507 extern struct host *hostList;
1509 GetSomeSpace_r(struct host *hostp, int locked)
1512 struct lih_params params;
1515 if (cbstuff.GotSomeSpaces == 0) {
1516 /* only log this once; if GSS is getting called constantly, that's not
1517 * good but don't make things worse by spamming the log. */
1518 ViceLog(0, ("We have run out of callback space; forcing callback revocation. "
1519 "This suggests the fileserver is configured with insufficient "
1520 "callbacks; you probably want to increase the -cb fileserver "
1521 "parameter (current setting: %u). The fileserver will continue "
1522 "to operate, but this may indicate a severe performance problem\n",
1524 ViceLog(0, ("This message is logged at most once; for more information "
1525 "see the OpenAFS documentation and fileserver xstat collection 3\n"));
1528 cbstuff.GotSomeSpaces++;
1530 ("GSS: First looking for timed out call backs via CleanupCallBacks\n"));
1531 if (CleanupTimedOutCallBacks_r()) {
1537 params.lastlih = NULL;
1542 h_Enumerate_r(i == 0 ? lih0_r : lih1_r, hostList, ¶ms);
1545 if (params.lastlih) {
1546 h_Release_r(params.lastlih);
1547 params.lastlih = NULL;
1551 /* note that 'hp' was held by lih*_r; we will need to release it */
1553 if ((hp != hostp) && !ClearHostCallbacks_r(hp, 0 /* not locked or held */ )) {
1558 params.lastlih = hp;
1559 /* params.lastlih will be released on the next iteration, after
1564 * Next time try getting callbacks from any host even if
1565 * it's held, since the only other option is starvation for
1566 * the file server (i.e. until the callback timeout arrives).
1569 params.lastlih = NULL;
1572 ("GSS: Try harder for longest inactive host cnt= %d\n",
1577 /* Could not obtain space from other hosts, clear hostp's callback state */
1582 ClearHostCallbacks_r(hostp, 1 /*already locked */ );
1589 /* locked - set if caller has already locked the host */
1591 ClearHostCallbacks_r(struct host *hp, int locked)
1595 struct rx_connection *cb_conn = NULL;
1598 ("GSS: Delete longest inactive host %p (%s:%d)\n",
1599 hp, afs_inet_ntoa_r(hp->host, hoststr), ntohs(hp->port)));
1601 if ((hp->hostFlags & HOSTDELETED)) {
1602 /* hp could go away after reacquiring H_LOCK in h_NBLock_r, so we can't
1603 * really use it; its callbacks will get cleared anyway when
1604 * h_TossStuff_r gets its hands on it */
1610 /** Try a non-blocking lock. If the lock is already held return
1611 * after releasing hold on hp
1614 if (h_NBLock_r(hp)) {
1619 if (hp->Console & 2) {
1621 * If the special console field is set it means that a thread
1622 * is waiting in AddCallBack1 after it set pointers to the
1623 * file entry and/or callback entry. Because of the bogus
1624 * usage of h_hold it won't prevent from another thread, this
1625 * one, to remove all the callbacks so just to be safe we keep
1626 * a reference. NOTE, on the last phase we'll free the calling
1627 * host's callbacks but that's ok...
1631 DeleteAllCallBacks_r(hp, 1);
1632 if (hp->hostFlags & VENUSDOWN) {
1633 hp->hostFlags &= ~RESETDONE; /* remember that we must do a reset */
1634 } else if (!(hp->hostFlags & HOSTDELETED)) {
1635 /* host is up, try a call */
1636 hp->hostFlags &= ~ALTADDR; /* alternate addresses are invalid */
1637 cb_conn = hp->callback_rxcon;
1638 rx_GetConnection(hp->callback_rxcon);
1639 if (hp->interface) {
1642 RXAFSCB_InitCallBackState3(cb_conn, &FS_HostUUID);
1645 code = RXAFSCB_InitCallBackState(cb_conn);
1647 rx_PutConnection(cb_conn);
1650 hp->hostFlags |= ALTADDR; /* alternate addresses are valid */
1652 /* failed, mark host down and need reset */
1653 hp->hostFlags |= VENUSDOWN;
1654 hp->hostFlags &= ~RESETDONE;
1656 /* reset succeeded, we're done */
1657 hp->hostFlags |= RESETDONE;
1666 #endif /* INTERPRET_DUMP */
1670 PrintCallBackStats(void)
1673 "%d add CB, %d break CB, %d del CB, %d del FE, %d CB's timed out, %d space reclaim, %d del host\n",
1674 cbstuff.AddCallBacks, cbstuff.BreakCallBacks,
1675 cbstuff.DeleteCallBacks, cbstuff.DeleteFiles, cbstuff.CBsTimedOut,
1676 cbstuff.GotSomeSpaces, cbstuff.DeleteAllCallBacks);
1677 fprintf(stderr, "%d CBs, %d FEs, (%d of total of %d 16-byte blocks)\n",
1678 cbstuff.nCBs, cbstuff.nFEs, cbstuff.nCBs + cbstuff.nFEs,
1680 fprintf(stderr, "%d GSS1, %d GSS2, %d GSS3, %d GSS4, %d GSS5 (internal counters)\n",
1681 cbstuff.GSS1, cbstuff.GSS2, cbstuff.GSS3, cbstuff.GSS4, cbstuff.GSS5);
1686 #define MAGIC 0x12345678 /* To check byte ordering of dump when it is read in */
1687 #define MAGICV2 0x12345679 /* To check byte ordering & version of dump when it is read in */
1690 #ifndef INTERPRET_DUMP
1692 #ifdef AFS_DEMAND_ATTACH_FS
1695 * callback state serialization
1697 static int cb_stateSaveTimeouts(struct fs_dump_state * state);
1698 static int cb_stateSaveFEHash(struct fs_dump_state * state);
1699 static int cb_stateSaveFEs(struct fs_dump_state * state);
1700 static int cb_stateSaveFE(struct fs_dump_state * state, struct FileEntry * fe);
1701 static int cb_stateRestoreTimeouts(struct fs_dump_state * state);
1702 static int cb_stateRestoreFEHash(struct fs_dump_state * state);
1703 static int cb_stateRestoreFEs(struct fs_dump_state * state);
1704 static int cb_stateRestoreFE(struct fs_dump_state * state);
1705 static int cb_stateRestoreCBs(struct fs_dump_state * state, struct FileEntry * fe,
1706 struct iovec * iov, int niovecs);
1708 static int cb_stateVerifyFEHash(struct fs_dump_state * state);
1709 static int cb_stateVerifyFE(struct fs_dump_state * state, struct FileEntry * fe);
1710 static int cb_stateVerifyFCBList(struct fs_dump_state * state, struct FileEntry * fe);
1711 static int cb_stateVerifyTimeoutQueues(struct fs_dump_state * state);
1713 static int cb_stateFEToDiskEntry(struct FileEntry *, struct FEDiskEntry *);
1714 static int cb_stateDiskEntryToFE(struct fs_dump_state * state,
1715 struct FEDiskEntry *, struct FileEntry *);
1717 static int cb_stateCBToDiskEntry(struct CallBack *, struct CBDiskEntry *);
1718 static int cb_stateDiskEntryToCB(struct fs_dump_state * state,
1719 struct CBDiskEntry *, struct CallBack *);
1721 static int cb_stateFillHeader(struct callback_state_header * hdr);
1722 static int cb_stateCheckHeader(struct callback_state_header * hdr);
1724 static int cb_stateAllocMap(struct fs_dump_state * state);
1727 cb_stateSave(struct fs_dump_state * state)
1731 AssignInt64(state->eof_offset, &state->hdr->cb_offset);
1733 /* invalidate callback state header */
1734 memset(state->cb_hdr, 0, sizeof(struct callback_state_header));
1735 if (fs_stateWriteHeader(state, &state->hdr->cb_offset, state->cb_hdr,
1736 sizeof(struct callback_state_header))) {
1741 fs_stateIncEOF(state, sizeof(struct callback_state_header));
1743 /* dump timeout state */
1744 if (cb_stateSaveTimeouts(state)) {
1749 /* dump fe hashtable state */
1750 if (cb_stateSaveFEHash(state)) {
1755 /* dump callback state */
1756 if (cb_stateSaveFEs(state)) {
1761 /* write the callback state header to disk */
1762 cb_stateFillHeader(state->cb_hdr);
1763 if (fs_stateWriteHeader(state, &state->hdr->cb_offset, state->cb_hdr,
1764 sizeof(struct callback_state_header))) {
1774 cb_stateRestore(struct fs_dump_state * state)
1778 if (fs_stateReadHeader(state, &state->hdr->cb_offset, state->cb_hdr,
1779 sizeof(struct callback_state_header))) {
1784 if (cb_stateCheckHeader(state->cb_hdr)) {
1789 if (cb_stateAllocMap(state)) {
1794 if (cb_stateRestoreTimeouts(state)) {
1799 if (cb_stateRestoreFEHash(state)) {
1804 /* restore FEs and CBs from disk */
1805 if (cb_stateRestoreFEs(state)) {
1810 /* restore the timeout queue heads */
1811 tfirst = state->cb_hdr->tfirst;
1818 cb_stateRestoreIndices(struct fs_dump_state * state)
1821 struct FileEntry * fe;
1822 struct CallBack * cb;
1824 /* restore indices in the FileEntry structures */
1825 for (i = 1; i < state->fe_map.len; i++) {
1826 if (state->fe_map.entries[i].new_idx) {
1827 fe = itofe(state->fe_map.entries[i].new_idx);
1829 /* restore the fe->fnext entry */
1830 if (fe_OldToNew(state, fe->fnext, &fe->fnext)) {
1835 /* restore the fe->firstcb entry */
1836 if (cb_OldToNew(state, fe->firstcb, &fe->firstcb)) {
1843 /* restore indices in the CallBack structures */
1844 for (i = 1; i < state->cb_map.len; i++) {
1845 if (state->cb_map.entries[i].new_idx) {
1846 cb = itocb(state->cb_map.entries[i].new_idx);
1848 /* restore the cb->cnext entry */
1849 if (cb_OldToNew(state, cb->cnext, &cb->cnext)) {
1854 /* restore the cb->fhead entry */
1855 if (fe_OldToNew(state, cb->fhead, &cb->fhead)) {
1860 /* restore the cb->hhead entry */
1861 if (h_OldToNew(state, cb->hhead, &cb->hhead)) {
1866 /* restore the cb->tprev entry */
1867 if (cb_OldToNew(state, cb->tprev, &cb->tprev)) {
1872 /* restore the cb->tnext entry */
1873 if (cb_OldToNew(state, cb->tnext, &cb->tnext)) {
1878 /* restore the cb->hprev entry */
1879 if (cb_OldToNew(state, cb->hprev, &cb->hprev)) {
1884 /* restore the cb->hnext entry */
1885 if (cb_OldToNew(state, cb->hnext, &cb->hnext)) {
1892 /* restore the timeout queue head indices */
1893 for (i = 0; i < state->cb_timeout_hdr->records; i++) {
1894 if (cb_OldToNew(state, timeout[i], &timeout[i])) {
1900 /* restore the FE hash table queue heads */
1901 for (i = 0; i < state->cb_fehash_hdr->records; i++) {
1902 if (fe_OldToNew(state, HashTable[i], &HashTable[i])) {
1913 cb_stateVerify(struct fs_dump_state * state)
1917 if (cb_stateVerifyFEHash(state)) {
1921 if (cb_stateVerifyTimeoutQueues(state)) {
1929 cb_stateVerifyFEHash(struct fs_dump_state * state)
1932 struct FileEntry * fe;
1933 afs_uint32 fei, chain_len;
1935 for (i = 0; i < FEHASH_SIZE; i++) {
1937 for (fei = HashTable[i], fe = itofe(fei);
1939 fei = fe->fnext, fe = itofe(fei)) {
1940 if (fei > cbstuff.nblks) {
1941 ViceLog(0, ("cb_stateVerifyFEHash: error: index out of range (fei=%d)\n", fei));
1945 if (cb_stateVerifyFE(state, fe)) {
1948 if (chain_len > FS_STATE_FE_MAX_HASH_CHAIN_LEN) {
1949 ViceLog(0, ("cb_stateVerifyFEHash: error: hash chain %d length exceeds %d; assuming there's a loop\n",
1950 i, FS_STATE_FE_MAX_HASH_CHAIN_LEN));
1962 cb_stateVerifyFE(struct fs_dump_state * state, struct FileEntry * fe)
1966 if ((fe->firstcb && !fe->ncbs) ||
1967 (!fe->firstcb && fe->ncbs)) {
1968 ViceLog(0, ("cb_stateVerifyFE: error: fe->firstcb does not agree with fe->ncbs (fei=%lu, fe->firstcb=%lu, fe->ncbs=%lu)\n",
1969 afs_printable_uint32_lu(fetoi(fe)),
1970 afs_printable_uint32_lu(fe->firstcb),
1971 afs_printable_uint32_lu(fe->ncbs)));
1974 if (cb_stateVerifyFCBList(state, fe)) {
1975 ViceLog(0, ("cb_stateVerifyFE: error: FCBList failed verification (fei=%lu)\n",
1976 afs_printable_uint32_lu(fetoi(fe))));
1984 cb_stateVerifyFCBList(struct fs_dump_state * state, struct FileEntry * fe)
1987 afs_uint32 cbi, fei, chain_len = 0;
1988 struct CallBack * cb;
1992 for (cbi = fe->firstcb, cb = itocb(cbi);
1994 cbi = cb->cnext, cb = itocb(cbi)) {
1995 if (cbi > cbstuff.nblks) {
1996 ViceLog(0, ("cb_stateVerifyFCBList: error: list index out of range (cbi=%d, ncbs=%d)\n",
1997 cbi, cbstuff.nblks));
2001 if (cb->fhead != fei) {
2002 ViceLog(0, ("cb_stateVerifyFCBList: error: cb->fhead != fei (fei=%d, cb->fhead=%d)\n",
2006 if (chain_len > FS_STATE_FCB_MAX_LIST_LEN) {
2007 ViceLog(0, ("cb_stateVerifyFCBList: error: list length exceeds %d (fei=%d); assuming there's a loop\n",
2008 FS_STATE_FCB_MAX_LIST_LEN, fei));
2015 if (fe->ncbs != chain_len) {
2016 ViceLog(0, ("cb_stateVerifyFCBList: error: list length mismatch (len=%d, fe->ncbs=%d)\n",
2017 chain_len, fe->ncbs));
2026 cb_stateVerifyHCBList(struct fs_dump_state * state, struct host * host)
2029 afs_uint32 hi, chain_len, cbi;
2030 struct CallBack *cb, *ncb;
2035 for (cbi = host->cblist, cb = itocb(cbi);
2037 cbi = cb->hnext, cb = ncb) {
2038 if (chain_len && (host->cblist == cbi)) {
2039 /* we've wrapped around the circular list, and everything looks ok */
2042 if (cb->hhead != hi) {
2043 ViceLog(0, ("cb_stateVerifyHCBList: error: incorrect cb->hhead (cbi=%d, h->index=%d, cb->hhead=%d)\n",
2044 cbi, hi, cb->hhead));
2047 if (!cb->hprev || !cb->hnext) {
2048 ViceLog(0, ("cb_stateVerifyHCBList: error: null index in circular list (cbi=%d, h->index=%d)\n",
2053 if ((cb->hprev > cbstuff.nblks) ||
2054 (cb->hnext > cbstuff.nblks)) {
2055 ViceLog(0, ("cb_stateVerifyHCBList: error: list index out of range (cbi=%d, h->index=%d, cb->hprev=%d, cb->hnext=%d, nCBs=%d)\n",
2056 cbi, hi, cb->hprev, cb->hnext, cbstuff.nblks));
2060 ncb = itocb(cb->hnext);
2061 if (cbi != ncb->hprev) {
2062 ViceLog(0, ("cb_stateVerifyHCBList: error: corrupt linked list (cbi=%d, h->index=%d)\n",
2067 if (chain_len > FS_STATE_HCB_MAX_LIST_LEN) {
2068 ViceLog(0, ("cb_stateVerifyFCBList: error: list length exceeds %d (h->index=%d); assuming there's a loop\n",
2069 FS_STATE_HCB_MAX_LIST_LEN, hi));
2081 cb_stateVerifyTimeoutQueues(struct fs_dump_state * state)
2084 afs_uint32 cbi, chain_len;
2085 struct CallBack *cb, *ncb;
2087 for (i = 0; i < CB_NUM_TIMEOUT_QUEUES; i++) {
2089 for (cbi = timeout[i], cb = itocb(cbi);
2091 cbi = cb->tnext, cb = ncb) {
2092 if (chain_len && (cbi == timeout[i])) {
2093 /* we've wrapped around the circular list, and everything looks ok */
2096 if (cbi > cbstuff.nblks) {
2097 ViceLog(0, ("cb_stateVerifyTimeoutQueues: error: list index out of range (cbi=%d, tindex=%d)\n",
2102 if (itot(cb->thead) != &timeout[i]) {
2103 ViceLog(0, ("cb_stateVerifyTimeoutQueues: error: cb->thead points to wrong timeout queue (tindex=%d, cbi=%d, cb->thead=%d)\n",
2104 i, cbi, cb->thead));
2107 if (!cb->tprev || !cb->tnext) {
2108 ViceLog(0, ("cb_stateVerifyTimeoutQueues: null index in circular list (cbi=%d, tindex=%d)\n",
2113 if ((cb->tprev > cbstuff.nblks) ||
2114 (cb->tnext > cbstuff.nblks)) {
2115 ViceLog(0, ("cb_stateVerifyTimeoutQueues: list index out of range (cbi=%d, tindex=%d, cb->tprev=%d, cb->tnext=%d, nCBs=%d)\n",
2116 cbi, i, cb->tprev, cb->tnext, cbstuff.nblks));
2120 ncb = itocb(cb->tnext);
2121 if (cbi != ncb->tprev) {
2122 ViceLog(0, ("cb_stateVerifyTimeoutQueues: corrupt linked list (cbi=%d, tindex=%d)\n",
2127 if (chain_len > FS_STATE_TCB_MAX_LIST_LEN) {
2128 ViceLog(0, ("cb_stateVerifyTimeoutQueues: list length exceeds %d (tindex=%d); assuming there's a loop\n",
2129 FS_STATE_TCB_MAX_LIST_LEN, i));
2141 cb_stateSaveTimeouts(struct fs_dump_state * state)
2144 struct iovec iov[2];
2146 AssignInt64(state->eof_offset, &state->cb_hdr->timeout_offset);
2148 memset(state->cb_timeout_hdr, 0, sizeof(struct callback_state_fehash_header));
2149 state->cb_timeout_hdr->magic = CALLBACK_STATE_TIMEOUT_MAGIC;
2150 state->cb_timeout_hdr->records = CB_NUM_TIMEOUT_QUEUES;
2151 state->cb_timeout_hdr->len = sizeof(struct callback_state_timeout_header) +
2152 (state->cb_timeout_hdr->records * sizeof(afs_uint32));
2154 iov[0].iov_base = (char *)state->cb_timeout_hdr;
2155 iov[0].iov_len = sizeof(struct callback_state_timeout_header);
2156 iov[1].iov_base = (char *)timeout;
2157 iov[1].iov_len = sizeof(timeout);
2159 if (fs_stateSeek(state, &state->cb_hdr->timeout_offset)) {
2164 if (fs_stateWriteV(state, iov, 2)) {
2169 fs_stateIncEOF(state, state->cb_timeout_hdr->len);
2176 cb_stateRestoreTimeouts(struct fs_dump_state * state)
2180 if (fs_stateReadHeader(state, &state->cb_hdr->timeout_offset,
2181 state->cb_timeout_hdr,
2182 sizeof(struct callback_state_timeout_header))) {
2187 if (state->cb_timeout_hdr->magic != CALLBACK_STATE_TIMEOUT_MAGIC) {
2191 if (state->cb_timeout_hdr->records != CB_NUM_TIMEOUT_QUEUES) {
2196 len = state->cb_timeout_hdr->records * sizeof(afs_uint32);
2198 if (state->cb_timeout_hdr->len !=
2199 (sizeof(struct callback_state_timeout_header) + len)) {
2204 if (fs_stateRead(state, timeout, len)) {
2214 cb_stateSaveFEHash(struct fs_dump_state * state)
2217 struct iovec iov[2];
2219 AssignInt64(state->eof_offset, &state->cb_hdr->fehash_offset);
2221 memset(state->cb_fehash_hdr, 0, sizeof(struct callback_state_fehash_header));
2222 state->cb_fehash_hdr->magic = CALLBACK_STATE_FEHASH_MAGIC;
2223 state->cb_fehash_hdr->records = FEHASH_SIZE;
2224 state->cb_fehash_hdr->len = sizeof(struct callback_state_fehash_header) +
2225 (state->cb_fehash_hdr->records * sizeof(afs_uint32));
2227 iov[0].iov_base = (char *)state->cb_fehash_hdr;
2228 iov[0].iov_len = sizeof(struct callback_state_fehash_header);
2229 iov[1].iov_base = (char *)HashTable;
2230 iov[1].iov_len = sizeof(HashTable);
2232 if (fs_stateSeek(state, &state->cb_hdr->fehash_offset)) {
2237 if (fs_stateWriteV(state, iov, 2)) {
2242 fs_stateIncEOF(state, state->cb_fehash_hdr->len);
2249 cb_stateRestoreFEHash(struct fs_dump_state * state)
2253 if (fs_stateReadHeader(state, &state->cb_hdr->fehash_offset,
2254 state->cb_fehash_hdr,
2255 sizeof(struct callback_state_fehash_header))) {
2260 if (state->cb_fehash_hdr->magic != CALLBACK_STATE_FEHASH_MAGIC) {
2264 if (state->cb_fehash_hdr->records != FEHASH_SIZE) {
2269 len = state->cb_fehash_hdr->records * sizeof(afs_uint32);
2271 if (state->cb_fehash_hdr->len !=
2272 (sizeof(struct callback_state_fehash_header) + len)) {
2277 if (fs_stateRead(state, HashTable, len)) {
2287 cb_stateSaveFEs(struct fs_dump_state * state)
2291 struct FileEntry *fe;
2293 AssignInt64(state->eof_offset, &state->cb_hdr->fe_offset);
2295 for (hash = 0; hash < FEHASH_SIZE ; hash++) {
2296 for (fei = HashTable[hash]; fei; fei = fe->fnext) {
2298 if (cb_stateSaveFE(state, fe)) {
2310 cb_stateRestoreFEs(struct fs_dump_state * state)
2312 int count, nFEs, ret = 0;
2314 nFEs = state->cb_hdr->nFEs;
2316 for (count = 0; count < nFEs; count++) {
2317 if (cb_stateRestoreFE(state)) {
2328 cb_stateSaveFE(struct fs_dump_state * state, struct FileEntry * fe)
2330 int ret = 0, iovcnt, cbi, written = 0;
2332 struct callback_state_entry_header hdr;
2333 struct FEDiskEntry fedsk;
2334 struct CBDiskEntry cbdsk[16];
2335 struct iovec iov[16];
2336 struct CallBack *cb;
2339 if (fei > state->cb_hdr->fe_max) {
2340 state->cb_hdr->fe_max = fei;
2343 memset(&hdr, 0, sizeof(struct callback_state_entry_header));
2345 if (cb_stateFEToDiskEntry(fe, &fedsk)) {
2350 iov[0].iov_base = (char *)&hdr;
2351 iov[0].iov_len = sizeof(hdr);
2352 iov[1].iov_base = (char *)&fedsk;
2353 iov[1].iov_len = sizeof(struct FEDiskEntry);
2356 for (cbi = fe->firstcb, cb = itocb(cbi);
2358 cbi = cb->cnext, cb = itocb(cbi), hdr.nCBs++) {
2359 if (cbi > state->cb_hdr->cb_max) {
2360 state->cb_hdr->cb_max = cbi;
2362 if (cb_stateCBToDiskEntry(cb, &cbdsk[iovcnt])) {
2366 cbdsk[iovcnt].index = cbi;
2367 iov[iovcnt].iov_base = (char *)&cbdsk[iovcnt];
2368 iov[iovcnt].iov_len = sizeof(struct CBDiskEntry);
2370 if ((iovcnt == 16) || (!cb->cnext)) {
2371 if (fs_stateWriteV(state, iov, iovcnt)) {
2380 hdr.magic = CALLBACK_STATE_ENTRY_MAGIC;
2381 hdr.len = sizeof(hdr) + sizeof(struct FEDiskEntry) +
2382 (hdr.nCBs * sizeof(struct CBDiskEntry));
2385 if (fs_stateWriteV(state, iov, iovcnt)) {
2390 if (fs_stateWriteHeader(state, &state->eof_offset, &hdr, sizeof(hdr))) {
2396 fs_stateIncEOF(state, hdr.len);
2399 if (fs_stateSeek(state, &state->eof_offset)) {
2405 state->cb_hdr->nFEs++;
2406 state->cb_hdr->nCBs += hdr.nCBs;
2413 cb_stateRestoreFE(struct fs_dump_state * state)
2415 int ret = 0, iovcnt, nCBs;
2416 struct callback_state_entry_header hdr;
2417 struct FEDiskEntry fedsk;
2418 struct CBDiskEntry cbdsk[16];
2419 struct iovec iov[16];
2420 struct FileEntry * fe;
2422 iov[0].iov_base = (char *)&hdr;
2423 iov[0].iov_len = sizeof(hdr);
2424 iov[1].iov_base = (char *)&fedsk;
2425 iov[1].iov_len = sizeof(fedsk);
2428 if (fs_stateReadV(state, iov, iovcnt)) {
2433 if (hdr.magic != CALLBACK_STATE_ENTRY_MAGIC) {
2440 ViceLog(0, ("cb_stateRestoreFE: ran out of free FileEntry structures\n"));
2445 if (cb_stateDiskEntryToFE(state, &fedsk, fe)) {
2451 for (iovcnt = 0, nCBs = 0;
2454 iov[iovcnt].iov_base = (char *)&cbdsk[iovcnt];
2455 iov[iovcnt].iov_len = sizeof(struct CBDiskEntry);
2457 if ((iovcnt == 16) || (nCBs == hdr.nCBs - 1)) {
2458 if (fs_stateReadV(state, iov, iovcnt)) {
2462 if (cb_stateRestoreCBs(state, fe, iov, iovcnt)) {
2476 cb_stateRestoreCBs(struct fs_dump_state * state, struct FileEntry * fe,
2477 struct iovec * iov, int niovecs)
2480 struct CallBack * cb;
2481 struct CBDiskEntry * cbdsk;
2483 for (idx = 0; idx < niovecs; idx++) {
2484 cbdsk = (struct CBDiskEntry *) iov[idx].iov_base;
2486 if (cbdsk->cb.hhead < state->h_map.len &&
2487 state->h_map.entries[cbdsk->cb.hhead].valid == FS_STATE_IDX_SKIPPED) {
2491 if ((cb = GetCB()) == NULL) {
2492 ViceLog(0, ("cb_stateRestoreCBs: ran out of free CallBack structures\n"));
2496 if (cb_stateDiskEntryToCB(state, cbdsk, cb)) {
2497 ViceLog(0, ("cb_stateRestoreCBs: corrupt CallBack disk entry\n"));
2509 cb_stateFillHeader(struct callback_state_header * hdr)
2511 hdr->stamp.magic = CALLBACK_STATE_MAGIC;
2512 hdr->stamp.version = CALLBACK_STATE_VERSION;
2513 hdr->tfirst = tfirst;
2518 cb_stateCheckHeader(struct callback_state_header * hdr)
2522 if (hdr->stamp.magic != CALLBACK_STATE_MAGIC) {
2524 } else if (hdr->stamp.version != CALLBACK_STATE_VERSION) {
2526 } else if ((hdr->nFEs > cbstuff.nblks) || (hdr->nCBs > cbstuff.nblks)) {
2527 ViceLog(0, ("cb_stateCheckHeader: saved callback state larger than callback memory allocation\n"));
2533 /* disk entry conversion routines */
2535 cb_stateFEToDiskEntry(struct FileEntry * in, struct FEDiskEntry * out)
2537 memcpy(&out->fe, in, sizeof(struct FileEntry));
2538 out->index = fetoi(in);
2543 cb_stateDiskEntryToFE(struct fs_dump_state * state,
2544 struct FEDiskEntry * in, struct FileEntry * out)
2548 memcpy(out, &in->fe, sizeof(struct FileEntry));
2550 /* setup FE map entry */
2551 if (!in->index || (in->index >= state->fe_map.len)) {
2552 ViceLog(0, ("cb_stateDiskEntryToFE: index (%d) out of range",
2557 state->fe_map.entries[in->index].valid = FS_STATE_IDX_VALID;
2558 state->fe_map.entries[in->index].old_idx = in->index;
2559 state->fe_map.entries[in->index].new_idx = fetoi(out);
2566 cb_stateCBToDiskEntry(struct CallBack * in, struct CBDiskEntry * out)
2568 memcpy(&out->cb, in, sizeof(struct CallBack));
2569 out->index = cbtoi(in);
2574 cb_stateDiskEntryToCB(struct fs_dump_state * state,
2575 struct CBDiskEntry * in, struct CallBack * out)
2579 memcpy(out, &in->cb, sizeof(struct CallBack));
2581 /* setup CB map entry */
2582 if (!in->index || (in->index >= state->cb_map.len)) {
2583 ViceLog(0, ("cb_stateDiskEntryToCB: index (%d) out of range\n",
2588 state->cb_map.entries[in->index].valid = FS_STATE_IDX_VALID;
2589 state->cb_map.entries[in->index].old_idx = in->index;
2590 state->cb_map.entries[in->index].new_idx = cbtoi(out);
2596 /* index map routines */
2598 cb_stateAllocMap(struct fs_dump_state * state)
2600 state->fe_map.len = state->cb_hdr->fe_max + 1;
2601 state->cb_map.len = state->cb_hdr->cb_max + 1;
2602 state->fe_map.entries = (struct idx_map_entry_t *)
2603 calloc(state->fe_map.len, sizeof(struct idx_map_entry_t));
2604 state->cb_map.entries = (struct idx_map_entry_t *)
2605 calloc(state->cb_map.len, sizeof(struct idx_map_entry_t));
2606 return ((state->fe_map.entries != NULL) && (state->cb_map.entries != NULL)) ? 0 : 1;
2610 fe_OldToNew(struct fs_dump_state * state, afs_uint32 old, afs_uint32 * new)
2614 /* FEs use a one-based indexing system, so old==0 implies no mapping */
2620 if (old >= state->fe_map.len) {
2621 ViceLog(0, ("fe_OldToNew: index %d is out of range\n", old));
2623 } else if (state->fe_map.entries[old].valid != FS_STATE_IDX_VALID ||
2624 state->fe_map.entries[old].old_idx != old) { /* sanity check */
2625 ViceLog(0, ("fe_OldToNew: index %d points to an invalid FileEntry record\n", old));
2628 *new = state->fe_map.entries[old].new_idx;
2636 cb_OldToNew(struct fs_dump_state * state, afs_uint32 old, afs_uint32 * new)
2640 /* CBs use a one-based indexing system, so old==0 implies no mapping */
2646 if (old >= state->cb_map.len) {
2647 ViceLog(0, ("cb_OldToNew: index %d is out of range\n", old));
2649 } else if (state->cb_map.entries[old].valid != FS_STATE_IDX_VALID ||
2650 state->cb_map.entries[old].old_idx != old) { /* sanity check */
2651 ViceLog(0, ("cb_OldToNew: index %d points to an invalid CallBack record\n", old));
2654 *new = state->cb_map.entries[old].new_idx;
2660 #endif /* AFS_DEMAND_ATTACH_FS */
2662 #define DumpBytes(fd,buf,req) if (write(fd, buf, req) < 0) ; /* don't care */
2665 DumpCallBackState_r(void)
2668 afs_uint32 magic = MAGICV2, now = (afs_int32) time(NULL), freelisthead;
2670 oflag = O_WRONLY | O_CREAT | O_TRUNC;
2674 fd = open(AFSDIR_SERVER_CBKDUMP_FILEPATH, oflag, 0666);
2677 ("Couldn't create callback dump file %s\n",
2678 AFSDIR_SERVER_CBKDUMP_FILEPATH));
2682 * Collect but ignoring the return value of write(2) here,
2683 * to avoid compiler warnings on some platforms.
2685 DumpBytes(fd, &magic, sizeof(magic));
2686 DumpBytes(fd, &now, sizeof(now));
2687 DumpBytes(fd, &cbstuff, sizeof(cbstuff));
2688 DumpBytes(fd, TimeOuts, sizeof(TimeOuts));
2689 DumpBytes(fd, timeout, sizeof(timeout));
2690 DumpBytes(fd, &tfirst, sizeof(tfirst));
2691 freelisthead = cbtoi((struct CallBack *)CBfree);
2692 DumpBytes(fd, &freelisthead, sizeof(freelisthead)); /* This is a pointer */
2693 freelisthead = fetoi((struct FileEntry *)FEfree);
2694 DumpBytes(fd, &freelisthead, sizeof(freelisthead)); /* This is a pointer */
2695 DumpBytes(fd, HashTable, sizeof(HashTable));
2696 DumpBytes(fd, &CB[1], sizeof(CB[1]) * cbstuff.nblks); /* CB stuff */
2697 DumpBytes(fd, &FE[1], sizeof(FE[1]) * cbstuff.nblks); /* FE stuff */
2704 DumpCallBackState(void) {
2708 rc = DumpCallBackState_r();
2714 #endif /* !INTERPRET_DUMP */
2716 #ifdef INTERPRET_DUMP
2719 ReadBytes(int fd, void *buf, size_t req)
2723 count = read(fd, buf, req);
2727 } else if (count != req) {
2728 fprintf(stderr, "read: premature EOF (expected %lu, got %lu)\n",
2729 (unsigned long)req, (unsigned long)count);
2734 /* This is only compiled in for the callback analyzer program */
2735 /* Returns the time of the dump */
2737 ReadDump(char *file, int timebits)
2740 afs_uint32 magic, freelisthead;
2748 fd = open(file, oflag);
2750 fprintf(stderr, "Couldn't read dump file %s\n", file);
2753 ReadBytes(fd, &magic, sizeof(magic));
2754 if (magic == MAGICV2) {
2757 if (magic != MAGIC) {
2759 "Magic number of %s is invalid. You might be trying to\n",
2762 "run this program on a machine type with a different byte ordering.\n");
2766 if (timebits == 64) {
2767 ReadBytes(fd, &now64, sizeof(afs_int64));
2768 now = (afs_int32) now64;
2770 ReadBytes(fd, &now, sizeof(afs_int32));
2772 ReadBytes(fd, &cbstuff, sizeof(cbstuff));
2773 ReadBytes(fd, TimeOuts, sizeof(TimeOuts));
2774 ReadBytes(fd, timeout, sizeof(timeout));
2775 ReadBytes(fd, &tfirst, sizeof(tfirst));
2776 ReadBytes(fd, &freelisthead, sizeof(freelisthead));
2777 CB = ((struct CallBack
2778 *)(calloc(cbstuff.nblks, sizeof(struct CallBack)))) - 1;
2779 FE = ((struct FileEntry
2780 *)(calloc(cbstuff.nblks, sizeof(struct FileEntry)))) - 1;
2781 CBfree = (struct CallBack *)itocb(freelisthead);
2782 ReadBytes(fd, &freelisthead, sizeof(freelisthead));
2783 FEfree = (struct FileEntry *)itofe(freelisthead);
2784 ReadBytes(fd, HashTable, sizeof(HashTable));
2785 ReadBytes(fd, &CB[1], sizeof(CB[1]) * cbstuff.nblks); /* CB stuff */
2786 ReadBytes(fd, &FE[1], sizeof(FE[1]) * cbstuff.nblks); /* FE stuff */
2788 perror("Error reading dumpfile");
2795 #include "AFS_component_version_number.h"
2797 #include "AFS_component_version_number.c"
2800 static afs_uint32 *cbTrack;
2803 main(int argc, char **argv)
2805 int err = 0, cbi = 0, stats = 0, noptions = 0, all = 0, vol = 0, raw = 0;
2807 struct FileEntry *fe;
2808 struct CallBack *cb;
2812 memset(&fid, 0, sizeof(fid));
2815 while (argc && **argv == '-') {
2818 if (!strcmp(*argv, "-host")) {
2824 cbi = atoi(*++argv);
2825 } else if (!strcmp(*argv, "-fid")) {
2831 fid.Volume = atoi(*++argv);
2832 fid.Vnode = atoi(*++argv);
2833 fid.Unique = atoi(*++argv);
2834 } else if (!strcmp(*argv, "-time")) {
2835 fprintf(stderr, "-time not supported\n");
2837 } else if (!strcmp(*argv, "-stats")) {
2839 } else if (!strcmp(*argv, "-all")) {
2841 } else if (!strcmp(*argv, "-raw")) {
2843 } else if (!strcmp(*argv, "-timebits")) {
2849 timebits = atoi(*++argv);
2850 if ((timebits != 32)
2854 } else if (!strcmp(*argv, "-volume")) {
2860 vol = atoi(*++argv);
2865 if (err || argc != 1) {
2867 "Usage: cbd [-host cbid] [-fid volume vnode] [-stats] [-all] [-timebits 32"
2869 "] callbackdumpfile\n");
2871 "[cbid is shown for each host in the hosts.dump file]\n");
2874 now = ReadDump(*argv, timebits);
2875 if (stats || noptions == 0) {
2876 time_t uxtfirst = UXtime(tfirst), tnow = now;
2877 printf("The time of the dump was %u %s", (unsigned int) now, ctime(&tnow));
2878 printf("The last time cleanup ran was %u %s", (unsigned int) uxtfirst,
2880 PrintCallBackStats();
2883 cbTrack = calloc(cbstuff.nblks, sizeof(cbTrack[0]));
2888 struct CallBack *cb;
2889 struct FileEntry *fe;
2891 for (hash = 0; hash < FEHASH_SIZE; hash++) {
2892 for (feip = &HashTable[hash]; (fe = itofe(*feip));) {
2893 if (!vol || (fe->volid == vol)) {
2894 afs_uint32 fe_i = fetoi(fe);
2896 for (cb = itocb(fe->firstcb); cb; cb = itocb(cb->cnext)) {
2897 afs_uint32 cb_i = cbtoi(cb);
2899 if (cb_i > cbstuff.nblks) {
2900 printf("CB index out of range (%u > %d), stopped for this FE\n",
2901 cb_i, cbstuff.nblks);
2905 if (cbTrack[cb_i]) {
2906 printf("CB entry already claimed for FE[%u] (this is FE[%u]), stopped\n",
2907 cbTrack[cb_i], fe_i);
2910 cbTrack[cb_i] = fe_i;
2922 afs_uint32 cfirst = cbi;
2927 } while (cbi != cfirst);
2932 printf("No callback entries for %u.%u\n", fid.Volume, fid.Vnode);
2935 cb = itocb(fe->firstcb);
2938 cb = itocb(cb->cnext);
2943 for (i = 1; i < cbstuff.nblks; i++) {
2944 p = (afs_int32 *) & FE[i];
2945 printf("%d:%12x%12x%12x%12x\n", i, p[0], p[1], p[2], p[3]);
2954 PrintCB(struct CallBack *cb, afs_uint32 now)
2956 struct FileEntry *fe = itofe(cb->fhead);
2957 time_t expires = TIndexToTime(cb->thead);
2962 printf("vol=%" AFS_VOLID_FMT " vn=%u cbs=%d hi=%d st=%d fest=%d, exp in %lu secs at %s",
2963 afs_printable_VolumeId_lu(fe->volid), fe->vnode, fe->ncbs,
2964 cb->hhead, cb->status, fe->status, expires - now, ctime(&expires));
2969 #if !defined(INTERPRET_DUMP)
2971 ** try breaking calbacks on afidp from host. Use multi_rx.
2972 ** return 0 on success, non-zero on failure
2975 MultiBreakCallBackAlternateAddress(struct host *host, struct AFSCBFids *afidp)
2979 retVal = MultiBreakCallBackAlternateAddress_r(host, afidp);
2985 MultiBreakCallBackAlternateAddress_r(struct host *host,
2986 struct AFSCBFids *afidp)
2989 struct rx_connection **conns;
2990 struct rx_connection *connSuccess = 0;
2991 struct AddrPort *interfaces;
2992 static struct rx_securityClass *sc = 0;
2993 static struct AFSCBs tc = { 0, 0 };
2996 /* nothing more can be done */
2997 if (!host->interface)
2998 return 1; /* failure */
3000 /* the only address is the primary interface */
3001 if (host->interface->numberOfInterfaces <= 1)
3002 return 1; /* failure */
3004 /* initialise a security object only once */
3006 sc = rxnull_NewClientSecurityObject();
3008 i = host->interface->numberOfInterfaces;
3009 interfaces = calloc(i, sizeof(struct AddrPort));
3010 conns = calloc(i, sizeof(struct rx_connection *));
3011 if (!interfaces || !conns) {
3012 ViceLogThenPanic(0, ("Failed malloc in "
3013 "MultiBreakCallBackAlternateAddress_r\n"));
3016 /* initialize alternate rx connections */
3017 for (i = 0, j = 0; i < host->interface->numberOfInterfaces; i++) {
3018 /* this is the current primary address */
3019 if (host->host == host->interface->interface[i].addr &&
3020 host->port == host->interface->interface[i].port)
3023 interfaces[j] = host->interface->interface[i];
3025 rx_NewConnection(interfaces[j].addr,
3026 interfaces[j].port, 1, sc, 0);
3027 rx_SetConnDeadTime(conns[j], 2);
3028 rx_SetConnHardDeadTime(conns[j], AFS_HARDDEADTIME);
3032 opr_Assert(j); /* at least one alternate address */
3034 ("Starting multibreakcall back on all addr for host %p (%s:%d)\n",
3035 host, afs_inet_ntoa_r(host->host, hoststr), ntohs(host->port)));
3037 multi_Rx(conns, j) {
3038 multi_RXAFSCB_CallBack(afidp, &tc);
3042 if (host->callback_rxcon)
3043 rx_DestroyConnection(host->callback_rxcon);
3044 host->callback_rxcon = conns[multi_i];
3045 /* add then remove */
3046 addInterfaceAddr_r(host, interfaces[multi_i].addr,
3047 interfaces[multi_i].port);
3048 removeInterfaceAddr_r(host, host->host, host->port);
3049 host->host = interfaces[multi_i].addr;
3050 host->port = interfaces[multi_i].port;
3051 connSuccess = conns[multi_i];
3052 rx_SetConnDeadTime(host->callback_rxcon, 50);
3053 rx_SetConnHardDeadTime(host->callback_rxcon, AFS_HARDDEADTIME);
3055 ("multibreakcall success with addr %s:%d\n",
3056 afs_inet_ntoa_r(interfaces[multi_i].addr, hoststr),
3057 ntohs(interfaces[multi_i].port)));
3064 /* Destroy all connections except the one on which we succeeded */
3065 for (i = 0; i < j; i++)
3066 if (conns[i] != connSuccess)
3067 rx_DestroyConnection(conns[i]);
3073 return 0; /* success */
3075 return 1; /* failure */
3080 ** try multi_RX probes to host.
3081 ** return 0 on success, non-0 on failure
3084 MultiProbeAlternateAddress_r(struct host *host)
3087 struct rx_connection **conns;
3088 struct rx_connection *connSuccess = 0;
3089 struct AddrPort *interfaces;
3090 static struct rx_securityClass *sc = 0;
3093 /* nothing more can be done */
3094 if (!host->interface)
3095 return 1; /* failure */
3097 /* the only address is the primary interface */
3098 if (host->interface->numberOfInterfaces <= 1)
3099 return 1; /* failure */
3101 /* initialise a security object only once */
3103 sc = rxnull_NewClientSecurityObject();
3105 i = host->interface->numberOfInterfaces;
3106 interfaces = calloc(i, sizeof(struct AddrPort));
3107 conns = calloc(i, sizeof(struct rx_connection *));
3108 if (!interfaces || !conns) {
3109 ViceLogThenPanic(0, ("Failed malloc in "
3110 "MultiProbeAlternateAddress_r\n"));
3113 /* initialize alternate rx connections */
3114 for (i = 0, j = 0; i < host->interface->numberOfInterfaces; i++) {
3115 /* this is the current primary address */
3116 if (host->host == host->interface->interface[i].addr &&
3117 host->port == host->interface->interface[i].port)
3120 interfaces[j] = host->interface->interface[i];
3122 rx_NewConnection(interfaces[j].addr,
3123 interfaces[j].port, 1, sc, 0);
3124 rx_SetConnDeadTime(conns[j], 2);
3125 rx_SetConnHardDeadTime(conns[j], AFS_HARDDEADTIME);
3129 opr_Assert(j); /* at least one alternate address */
3131 ("Starting multiprobe on all addr for host %p (%s:%d)\n",
3132 host, afs_inet_ntoa_r(host->host, hoststr),
3133 ntohs(host->port)));
3135 multi_Rx(conns, j) {
3136 multi_RXAFSCB_ProbeUuid(&host->interface->uuid);
3140 if (host->callback_rxcon)
3141 rx_DestroyConnection(host->callback_rxcon);
3142 host->callback_rxcon = conns[multi_i];
3143 /* add then remove */
3144 addInterfaceAddr_r(host, interfaces[multi_i].addr,
3145 interfaces[multi_i].port);
3146 removeInterfaceAddr_r(host, host->host, host->port);
3147 host->host = interfaces[multi_i].addr;
3148 host->port = interfaces[multi_i].port;
3149 connSuccess = conns[multi_i];
3150 rx_SetConnDeadTime(host->callback_rxcon, 50);
3151 rx_SetConnHardDeadTime(host->callback_rxcon, AFS_HARDDEADTIME);
3153 ("multiprobe success with addr %s:%d\n",
3154 afs_inet_ntoa_r(interfaces[multi_i].addr, hoststr),
3155 ntohs(interfaces[multi_i].port)));
3160 ("multiprobe failure with addr %s:%d\n",
3161 afs_inet_ntoa_r(interfaces[multi_i].addr, hoststr),
3162 ntohs(interfaces[multi_i].port)));
3164 /* This is less than desirable but its the best we can do.
3165 * The AFS Cache Manager will return either 0 for a Uuid
3166 * match and a 1 for a non-match. If the error is 1 we
3167 * therefore know that our mapping of IP address to Uuid
3168 * is wrong. We should attempt to find the correct
3169 * Uuid and fix the host tables.
3171 if (multi_error == 1) {
3172 /* remove the current alternate address from this host */
3174 removeInterfaceAddr_r(host, interfaces[multi_i].addr, interfaces[multi_i].port);
3178 #ifdef AFS_DEMAND_ATTACH_FS
3179 /* try to bail ASAP if the fileserver is shutting down */
3181 if (fs_state.mode == FS_MODE_SHUTDOWN) {
3190 /* Destroy all connections except the one on which we succeeded */
3191 for (i = 0; i < j; i++)
3192 if (conns[i] != connSuccess)
3193 rx_DestroyConnection(conns[i]);
3199 return 0; /* success */
3201 return 1; /* failure */
3204 #endif /* !defined(INTERPRET_DUMP) */