2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
9 * Portions Copyright (c) 2006 Sine Nomine Associates
13 * NEW callback package callback.c (replaces vicecb.c)
14 * Updated call back routines, NOW with:
16 * Faster DeleteVenus (Now called DeleteAllCallBacks)
17 * Call back breaking for volumes
18 * Adaptive timeouts on call backs
19 * Architected for Multi RPC
20 * No locks (currently implicit vnode locks--these will go, to)
21 * Delayed call back when rpc connection down.
22 * Bulk break of delayed call backs when rpc connection
24 * Strict limit on number of call backs.
26 * InitCallBack(nblocks)
27 * Initialize: nblocks is max number # of file entries + # of callback entries
28 * nblocks must be < 65536
29 * Space used is nblocks*16 bytes
30 * Note that space will be reclaimed by breaking callbacks of old hosts
32 * time = AddCallBack(host, fid)
34 * Returns the expiration time at the workstation.
36 * BreakCallBack(host, fid)
37 * Break all call backs for fid, except for the specified host.
40 * BreakVolumeCallBacks(volume)
41 * Break all call backs on volume, using single call to each host
42 * Delete all the call backs.
44 * DeleteCallBack(host,fid)
45 * Delete (do not break) single call back for fid.
47 * DeleteFileCallBacks(fid)
48 * Delete (do not break) all call backs for fid.
50 * DeleteAllCallBacks(host)
51 * Delete (do not break) all call backs for host.
53 * CleanupTimedOutCallBacks()
54 * Delete all timed out call back entries
55 * Must be called periodically by file server.
57 * BreakDelayedCallBacks(host)
58 * Break all delayed call backs for host.
59 * Returns 1: one or more failed, 0: success.
61 * PrintCallBackStats()
62 * Print statistics about call backs to stdout.
64 * DumpCallBacks() ---wishful thinking---
65 * Dump call back state to /tmp/callback.state.
66 * This is separately interpretable by the program pcb.
68 * Notes: In general, if a call back to a host doesn't get through,
69 * then HostDown, supplied elsewhere, is called. BreakDelayedCallBacks,
70 * however, does not call HostDown, but instead returns an indication of
71 * success if all delayed call backs were finally broken.
73 * BreakDelayedCallBacks MUST be called at the first sign of activity
74 * from the host after HostDown has been called (or a previous
75 * BreakDelayedCallBacks failed). The BreakDelayedCallBacks must be
76 * allowed to complete before any requests from that host are handled.
77 * If BreakDelayedCallBacks fails, then the host should remain
78 * down (and the request should be failed).
80 * CleanupCallBacks MUST be called periodically by the file server for
81 * this package to work correctly. Every 5 minutes is suggested.
84 #include <afsconfig.h>
85 #include <afs/param.h>
89 #include <stdlib.h> /* for malloc() */
90 #include <time.h> /* ANSI standard location for time stuff */
100 #include <afs/assert.h>
102 #include <afs/stds.h>
104 #include <afs/nfs.h> /* yuck. This is an abomination. */
107 #include <afscbint.h>
108 #include <afs/afsutil.h>
110 #include <afs/ihandle.h>
111 #include <afs/vnode.h>
112 #include <afs/volume.h>
113 #include "viced_prototypes.h"
116 #include <afs/ptclient.h> /* need definition of prlist for host.h */
118 #include "callback.h"
119 #ifdef AFS_DEMAND_ATTACH_FS
120 #include "../tviced/serialize_state.h"
121 #endif /* AFS_DEMAND_ATTACH_FS */
124 extern afsUUID FS_HostUUID;
125 extern int hostCount;
127 #ifndef INTERPRET_DUMP
128 static int ShowProblems = 1;
131 struct cbcounters cbstuff;
133 static struct FileEntry * FE = NULL; /* don't use FE[0] */
134 static struct CallBack * CB = NULL; /* don't use CB[0] */
136 static struct CallBack * CBfree = NULL;
137 static struct FileEntry * FEfree = NULL;
140 /* Time to live for call backs depends upon number of users of the file.
141 * TimeOuts is indexed by this number/8 (using TimeOut macro). Times
142 * in this table are for the workstation; server timeouts, add
145 static int TimeOuts[] = {
146 /* Note: don't make the first entry larger than 4 hours (see above) */
147 4 * 60 * 60, /* 0-7 users */
148 1 * 60 * 60, /* 8-15 users */
149 30 * 60, /* 16-23 users */
150 15 * 60, /* 24-31 users */
151 15 * 60, /* 32-39 users */
152 10 * 60, /* 40-47 users */
153 10 * 60, /* 48-55 users */
154 10 * 60, /* 56-63 users */
155 }; /* Anything more: MinTimeOut */
157 /* minimum time given for a call back */
158 #ifndef INTERPRET_DUMP
159 static int MinTimeOut = (7 * 60);
162 /* Heads of CB queues; a timeout index is 1+index into this array */
163 static afs_uint32 timeout[CB_NUM_TIMEOUT_QUEUES];
165 static afs_int32 tfirst; /* cbtime of oldest unexpired call back time queue */
168 /* 16 byte object get/free routines */
173 /* Prototypes for static routines */
174 static struct FileEntry *FindFE(register AFSFid * fid);
176 #ifndef INTERPRET_DUMP
177 static struct CallBack *iGetCB(register int *nused);
178 static int iFreeCB(register struct CallBack *cb, register int *nused);
179 static struct FileEntry *iGetFE(register int *nused);
180 static int iFreeFE(register struct FileEntry *fe, register int *nused);
181 static int TAdd(register struct CallBack *cb, register afs_uint32 * thead);
182 static int TDel(register struct CallBack *cb);
183 static int HAdd(register struct CallBack *cb, register struct host *host);
184 static int HDel(register struct CallBack *cb);
185 static int CDel(struct CallBack *cb, int deletefe);
186 static int CDelPtr(register struct FileEntry *fe, register afs_uint32 * cbp,
188 static afs_uint32 *FindCBPtr(struct FileEntry *fe, struct host *host);
189 static int FDel(register struct FileEntry *fe);
190 static int AddCallBack1_r(struct host *host, AFSFid * fid, afs_uint32 * thead,
191 int type, int locked);
192 static void MultiBreakCallBack_r(struct cbstruct cba[], int ncbas,
193 struct AFSCBFids *afidp, struct host *xhost);
194 static int MultiBreakVolumeCallBack_r(struct host *host, int isheld,
195 struct VCBParams *parms, int deletefe);
196 static int MultiBreakVolumeCallBack(struct host *host, int isheld,
198 static int MultiBreakVolumeLaterCallBack(struct host *host, int isheld,
200 static int GetSomeSpace_r(struct host *hostp, int locked);
201 static int ClearHostCallbacks_r(struct host *hp, int locked);
204 #define GetCB() ((struct CallBack *)iGetCB(&cbstuff.nCBs))
205 #define GetFE() ((struct FileEntry *)iGetFE(&cbstuff.nFEs))
206 #define FreeCB(cb) iFreeCB((struct CallBack *)cb, &cbstuff.nCBs)
207 #define FreeFE(fe) iFreeFE((struct FileEntry *)fe, &cbstuff.nFEs)
210 /* Other protos - move out sometime */
211 void PrintCB(register struct CallBack *cb, afs_uint32 now);
213 static afs_uint32 HashTable[FEHASH_SIZE]; /* File entry hash table */
215 static struct FileEntry *
216 FindFE(register AFSFid * fid)
220 register struct FileEntry *fe;
222 hash = FEHash(fid->Volume, fid->Unique);
223 for (fei = HashTable[hash]; fei; fei = fe->fnext) {
225 if (fe->volid == fid->Volume && fe->unique == fid->Unique
226 && fe->vnode == fid->Vnode && (fe->status & FE_LATER) != FE_LATER)
232 #ifndef INTERPRET_DUMP
234 static struct CallBack *
235 iGetCB(register int *nused)
237 register struct CallBack *ret;
239 if ((ret = CBfree)) {
240 CBfree = (struct CallBack *)(((struct object *)ret)->next);
247 iFreeCB(register struct CallBack *cb, register int *nused)
249 ((struct object *)cb)->next = (struct object *)CBfree;
255 static struct FileEntry *
256 iGetFE(register int *nused)
258 register struct FileEntry *ret;
260 if ((ret = FEfree)) {
261 FEfree = (struct FileEntry *)(((struct object *)ret)->next);
268 iFreeFE(register struct FileEntry *fe, register int *nused)
270 ((struct object *)fe)->next = (struct object *)FEfree;
276 /* Add cb to end of specified timeout list */
278 TAdd(register struct CallBack *cb, register afs_uint32 * thead)
281 (*thead) = cb->tnext = cb->tprev = cbtoi(cb);
283 register struct CallBack *thp = itocb(*thead);
285 cb->tprev = thp->tprev;
289 thp->tprev = (itocb(thp->tprev)->tnext = cbtoi(cb));
291 thp->tprev = cbtoi(cb);
294 cb->thead = ttoi(thead);
298 /* Delete call back entry from timeout list */
300 TDel(register struct CallBack *cb)
302 register afs_uint32 *thead = itot(cb->thead);
304 if (*thead == cbtoi(cb))
305 *thead = (*thead == cb->tnext ? 0 : cb->tnext);
306 if (itocb(cb->tprev))
307 itocb(cb->tprev)->tnext = cb->tnext;
308 if (itocb(cb->tnext))
309 itocb(cb->tnext)->tprev = cb->tprev;
313 /* Add cb to end of specified host list */
315 HAdd(register struct CallBack *cb, register struct host *host)
317 cb->hhead = h_htoi(host);
319 host->cblist = cb->hnext = cb->hprev = cbtoi(cb);
321 register struct CallBack *fcb = itocb(host->cblist);
323 cb->hprev = fcb->hprev;
324 cb->hnext = cbtoi(fcb);
325 fcb->hprev = (itocb(fcb->hprev)->hnext = cbtoi(cb));
330 /* Delete call back entry from host list */
332 HDel(register struct CallBack *cb)
334 register afs_uint32 *hhead = &h_itoh(cb->hhead)->cblist;
336 if (*hhead == cbtoi(cb))
337 *hhead = (*hhead == cb->hnext ? 0 : cb->hnext);
338 itocb(cb->hprev)->hnext = cb->hnext;
339 itocb(cb->hnext)->hprev = cb->hprev;
343 /* Delete call back entry from fid's chain of cb's */
344 /* N.B. This one also deletes the CB, and also possibly parent FE, so
345 * make sure that it is not on any other list before calling this
348 CDel(struct CallBack *cb, int deletefe)
351 struct FileEntry *fe = itofe(cb->fhead);
352 register afs_uint32 *cbp;
355 for (safety = 0, cbp = &fe->firstcb; *cbp && *cbp != cbi;
356 cbp = &itocb(*cbp)->cnext, safety++) {
357 if (safety > cbstuff.nblks + 10) {
360 ("CDel: Internal Error -- shutting down: wanted %d from %d, now at %d\n",
361 cbi, fe->firstcb, *cbp));
363 ShutDownAndCore(PANIC);
366 CDelPtr(fe, cbp, deletefe);
370 /* Same as CDel, but pointer to parent pointer to CB entry is passed,
371 * as well as file entry */
372 /* N.B. This one also deletes the CB, and also possibly parent FE, so
373 * make sure that it is not on any other list before calling this
375 static int Ccdelpt = 0, CcdelB = 0;
378 CDelPtr(register struct FileEntry *fe, register afs_uint32 * cbp,
381 register struct CallBack *cb;
391 if ((--fe->ncbs == 0) && deletefe)
397 FindCBPtr(struct FileEntry *fe, struct host *host)
399 register afs_uint32 hostindex = h_htoi(host);
400 register struct CallBack *cb;
401 register afs_uint32 *cbp;
404 for (safety = 0, cbp = &fe->firstcb; *cbp; cbp = &cb->cnext, safety++) {
405 if (safety > cbstuff.nblks) {
406 ViceLog(0, ("FindCBPtr: Internal Error -- shutting down.\n"));
408 ShutDownAndCore(PANIC);
411 if (cb->hhead == hostindex)
417 /* Delete file entry from hash table */
419 FDel(register struct FileEntry *fe)
421 register int fei = fetoi(fe);
422 register afs_uint32 *p = &HashTable[FEHash(fe->volid, fe->unique)];
424 while (*p && *p != fei)
425 p = &itofe(*p)->fnext;
432 /* initialize the callback package */
434 InitCallBack(int nblks)
437 tfirst = CBtime(FT_ApproxTime());
438 /* N.B. The "-1", below, is because
439 * FE[0] and CB[0] are not used--and not allocated */
440 FE = ((struct FileEntry *)(calloc(nblks, sizeof(struct FileEntry))));
442 ViceLog(0, ("Failed malloc in InitCallBack\n"));
445 FE--; /* FE[0] is supposed to point to junk */
446 cbstuff.nFEs = nblks;
448 FreeFE(&FE[cbstuff.nFEs]); /* This is correct */
449 CB = ((struct CallBack *)(calloc(nblks, sizeof(struct CallBack))));
451 ViceLog(0, ("Failed malloc in InitCallBack\n"));
454 CB--; /* CB[0] is supposed to point to junk */
455 cbstuff.nCBs = nblks;
457 FreeCB(&CB[cbstuff.nCBs]); /* This is correct */
458 cbstuff.nblks = nblks;
459 cbstuff.nbreakers = 0;
465 XCallBackBulk_r(struct host * ahost, struct AFSFid * fids, afs_int32 nfids)
467 struct AFSCallBack tcbs[AFSCBMAX];
473 struct rx_connection *cb_conn = NULL;
476 rx_SetConnDeadTime(ahost->callback_rxcon, 4);
477 rx_SetConnHardDeadTime(ahost->callback_rxcon, AFS_HARDDEADTIME);
484 for (i = 0; i < nfids && i < AFSCBMAX; i++) {
485 tcbs[i].CallBackVersion = CALLBACK_VERSION;
486 tcbs[i].ExpirationTime = 0;
487 tcbs[i].CallBackType = CB_DROPPED;
489 tf.AFSCBFids_len = i;
490 tf.AFSCBFids_val = &(fids[j]);
494 tc.AFSCBs_val = tcbs;
496 cb_conn = ahost->callback_rxcon;
497 rx_GetConnection(cb_conn);
499 code |= RXAFSCB_CallBack(cb_conn, &tf, &tc);
500 rx_PutConnection(cb_conn);
508 /* the locked flag tells us if the host entry has already been locked
509 * by our parent. I don't think anybody actually calls us with the
510 * host locked, but here's how to make that work: GetSomeSpace has to
511 * change so that it doesn't attempt to lock any hosts < "host". That
512 * means that it might be unable to free any objects, so it has to
513 * return an exit status. If it fails, then AddCallBack1 might fail,
514 * as well. If so, the host->ResetDone should probably be set to 0,
515 * and we probably don't want to return a callback promise to the
516 * cache manager, either. */
518 AddCallBack1(struct host *host, AFSFid * fid, afs_uint32 * thead, int type,
526 retVal = AddCallBack1_r(host, fid, thead, type, 1);
536 AddCallBack1_r(struct host *host, AFSFid * fid, afs_uint32 * thead, int type,
539 struct FileEntry *fe;
540 struct CallBack *cb = 0, *lastcb = 0;
541 struct FileEntry *newfe = 0;
542 afs_uint32 time_out = 0;
543 afs_uint32 *Thead = thead;
544 struct CallBack *newcb = 0;
547 cbstuff.AddCallBacks++;
551 /* allocate these guys first, since we can't call the allocator with
552 * the host structure locked -- or we might deadlock. However, we have
553 * to avoid races with FindFE... */
554 while (!(newcb = GetCB())) {
555 GetSomeSpace_r(host, locked);
557 while (!(newfe = GetFE())) { /* Get it now, so we don't have to call */
558 /* GetSomeSpace with the host locked, later. This might turn out to */
559 /* have been unneccessary, but that's actually kind of unlikely, since */
560 /* most files are not shared. */
561 GetSomeSpace_r(host, locked);
565 h_Lock_r(host); /* this can yield, so do it before we get any */
570 if (type == CB_NORMAL) {
572 TimeCeiling(FT_ApproxTime() + TimeOut(fe ? fe->ncbs : 0) +
574 Thead = THead(CBtime(time_out));
575 } else if (type == CB_VOLUME) {
576 time_out = TimeCeiling((60 * 120 + FT_ApproxTime()) + ServerBias);
577 Thead = THead(CBtime(time_out));
578 } else if (type == CB_BULK) {
579 /* bulk status can get so many callbacks all at once, and most of them
580 * are probably not for things that will be used for long.
583 TimeCeiling(FT_ApproxTime() + ServerBias +
584 TimeOut(22 + (fe ? fe->ncbs : 0)));
585 Thead = THead(CBtime(time_out));
591 register afs_uint32 hash;
596 fe->volid = fid->Volume;
597 fe->vnode = fid->Vnode;
598 fe->unique = fid->Unique;
601 hash = FEHash(fid->Volume, fid->Unique);
602 fe->fnext = HashTable[hash];
603 HashTable[hash] = fetoi(fe);
605 for (safety = 0, lastcb = cb = itocb(fe->firstcb); cb;
606 lastcb = cb, cb = itocb(cb->cnext), safety++) {
607 if (safety > cbstuff.nblks) {
608 ViceLog(0, ("AddCallBack1: Internal Error -- shutting down.\n"));
610 ShutDownAndCore(PANIC);
612 if (cb->hhead == h_htoi(host))
615 if (cb) { /* Already have call back: move to new timeout list */
616 /* don't change delayed callbacks back to normal ones */
617 if (cb->status != CB_DELAYED)
619 /* Only move if new timeout is longer */
620 if (TNorm(ttoi(Thead)) > TNorm(cb->thead)) {
624 if (newfe == NULL) { /* we are using the new FE */
625 fe->firstcb = cbtoi(cb);
627 cb->fhead = fetoi(fe);
632 *(lastcb ? &lastcb->cnext : &fe->firstcb) = cbtoi(cb);
635 cb->fhead = fetoi(fe);
641 /* now free any still-unused callback or host entries */
647 if (!locked) /* freecb and freefe might(?) yield */
650 if (type == CB_NORMAL || type == CB_VOLUME || type == CB_BULK)
651 return time_out - ServerBias; /* Expires sooner at workstation */
657 CompareCBA(const void *e1, const void *e2)
659 const struct cbstruct *cba1 = (const struct cbstruct *)e1;
660 const struct cbstruct *cba2 = (const struct cbstruct *)e2;
661 return ((cba1->hp)->index - (cba2->hp)->index);
664 /* Take an array full of hosts, all held. Break callbacks to them, and
665 * release the holds once you're done, except don't release xhost. xhost
666 * may be NULL. Currently only works for a single Fid in afidp array.
667 * If you want to make this work with multiple fids, you need to fix
668 * the error handling. One approach would be to force a reset if a
669 * multi-fid call fails, or you could add delayed callbacks for each
670 * fid. You probably also need to sort and remove duplicate hosts.
671 * When this is called from the BreakVolumeCallBacks path, it does NOT
672 * force a reset if the RPC fails, it just marks the host down and tries
673 * to create a delayed callback. */
674 /* N.B. be sure that code works when ncbas == 0 */
675 /* N.B. requires all the cba[*].hp pointers to be valid... */
676 /* This routine does not hold a lock on the host for the duration of
677 * the BreakCallBack RPC, which is a significant deviation from tradition.
678 * It _does_ get a lock on the host before setting VenusDown = 1,
679 * which is sufficient only if VenusDown = 0 only happens when the
680 * lock is held over the RPC and the subsequent VenusDown == 0
681 * wherever that is done. */
683 MultiBreakCallBack_r(struct cbstruct cba[], int ncbas,
684 struct AFSCBFids *afidp, struct host *xhost)
687 struct rx_connection *conns[MAX_CB_HOSTS];
688 static struct AFSCBs tc = { 0, 0 };
689 int multi_to_cba_map[MAX_CB_HOSTS];
691 assert(ncbas <= MAX_CB_HOSTS);
693 /* sort cba list to avoid makecall issues */
694 qsort(cba, ncbas, sizeof(struct cbstruct), CompareCBA);
696 /* set up conns for multi-call */
697 for (i = 0, j = 0; i < ncbas; i++) {
698 struct host *thishost = cba[i].hp;
699 if (!thishost || (thishost->hostFlags & HOSTDELETED)) {
702 rx_GetConnection(thishost->callback_rxcon);
703 multi_to_cba_map[j] = i;
704 conns[j++] = thishost->callback_rxcon;
707 rx_SetConnDeadTime(thishost->callback_rxcon, 4);
708 rx_SetConnHardDeadTime(thishost->callback_rxcon, AFS_HARDDEADTIME);
712 if (j) { /* who knows what multi would do with 0 conns? */
716 multi_RXAFSCB_CallBack(afidp, &tc);
722 i = multi_to_cba_map[multi_i];
728 ("BCB: INTERNAL ERROR: hp=%x, cba=%x, thead=%u\n",
732 ** try breaking callbacks on alternate interface addresses
734 if (MultiBreakCallBackAlternateAddress(hp, afidp)) {
737 ("BCB: Failed on file %u.%u.%u, Host %x (%s:%d) is down\n",
738 afidp->AFSCBFids_val->Volume,
739 afidp->AFSCBFids_val->Vnode,
740 afidp->AFSCBFids_val->Unique,
742 afs_inet_ntoa_r(hp->host, hoststr),
748 hp->hostFlags |= VENUSDOWN;
750 * We always go into AddCallBack1_r with the host locked
752 AddCallBack1_r(hp, afidp->AFSCBFids_val, itot(idx),
765 for (i = 0; i < ncbas; i++) {
768 if (hp && xhost != hp) {
773 /* H_UNLOCK around this so h_FreeConnection does not deadlock.
774 h_FreeConnection should *never* be called on a callback connection,
775 but on 10/27/04 a deadlock occurred where it was, when we know why,
776 this should be reverted. -- shadow */
778 for (i = 0; i < j; i++) {
779 rx_PutConnection(conns[i]);
787 * Break all call backs for fid, except for the specified host (unless flag
788 * is true, in which case all get a callback message. Assumption: the specified
789 * host is h_Held, by the caller; the others aren't.
790 * Specified host may be bogus, that's ok. This used to check to see if the
791 * host was down in two places, once right after the host was h_held, and
792 * again after it was locked. That race condition is incredibly rare and
793 * relatively harmless even when it does occur, so we don't check for it now.
795 /* if flag is true, send a break callback msg to "host", too */
797 BreakCallBack(struct host *xhost, AFSFid * fid, int flag)
799 struct FileEntry *fe;
800 struct CallBack *cb, *nextcb;
801 struct cbstruct cba[MAX_CB_HOSTS];
808 ("BCB: BreakCallBack(Host %x all but %s:%d, (%u,%u,%u))\n",
809 xhost, afs_inet_ntoa_r(xhost->host, hoststr), ntohs(xhost->port),
810 fid->Volume, fid->Vnode, fid->Unique));
813 cbstuff.BreakCallBacks++;
818 hostindex = h_htoi(xhost);
819 cb = itocb(fe->firstcb);
820 if (!cb || ((fe->ncbs == 1) && (cb->hhead == hostindex) && !flag)) {
821 /* the most common case is what follows the || */
824 tf.AFSCBFids_len = 1;
825 tf.AFSCBFids_val = fid;
828 for (ncbas = 0; cb && ncbas < MAX_CB_HOSTS; cb = nextcb) {
829 nextcb = itocb(cb->cnext);
830 if ((cb->hhead != hostindex || flag)
831 && (cb->status == CB_BULK || cb->status == CB_NORMAL
832 || cb->status == CB_VOLUME)) {
833 struct host *thishost = h_itoh(cb->hhead);
835 ViceLog(0, ("BCB: BOGUS! cb->hhead is NULL!\n"));
836 } else if (thishost->hostFlags & VENUSDOWN) {
838 ("BCB: %x (%s:%d) is down; delaying break call back\n",
839 thishost, afs_inet_ntoa_r(thishost->host, hoststr),
840 ntohs(thishost->port)));
841 cb->status = CB_DELAYED;
844 cba[ncbas].hp = thishost;
845 cba[ncbas].thead = cb->thead;
849 CDel(cb, 1); /* Usually first; so this delete
850 * is reasonably inexpensive */
856 MultiBreakCallBack_r(cba, ncbas, &tf, xhost);
858 /* we need to to all these initializations again because MultiBreakCallBack may block */
863 cb = itocb(fe->firstcb);
864 if (!cb || ((fe->ncbs == 1) && (cb->hhead == hostindex) && !flag)) {
865 /* the most common case is what follows the || */
876 /* Delete (do not break) single call back for fid */
878 DeleteCallBack(struct host *host, AFSFid * fid)
880 register struct FileEntry *fe;
881 register afs_uint32 *pcb;
885 cbstuff.DeleteCallBacks++;
893 ("DCB: No call backs for fid (%u, %u, %u)\n", fid->Volume,
894 fid->Vnode, fid->Unique));
897 pcb = FindCBPtr(fe, host);
900 ("DCB: No call back for host %x (%s:%d), (%u, %u, %u)\n",
901 host, afs_inet_ntoa_r(host->host, hoststr), ntohs(host->port),
902 fid->Volume, fid->Vnode, fid->Unique));
916 * Delete (do not break) all call backs for fid. This call doesn't
917 * set all of the various host locks, but it shouldn't really matter
918 * since we're not adding callbacks, but deleting them. I'm not sure
919 * why it doesn't set the lock, however; perhaps it should.
922 DeleteFileCallBacks(AFSFid * fid)
924 register struct FileEntry *fe;
925 register struct CallBack *cb;
926 register afs_uint32 cbi;
930 cbstuff.DeleteFiles++;
935 ("DF: No fid (%u,%u,%u) to delete\n", fid->Volume, fid->Vnode,
939 for (n = 0, cbi = fe->firstcb; cbi; n++) {
952 /* Delete (do not break) all call backs for host. The host should be
955 DeleteAllCallBacks_r(struct host *host, int deletefe)
957 register struct CallBack *cb;
958 register int cbi, first;
960 cbstuff.DeleteAllCallBacks++;
961 cbi = first = host->cblist;
963 ViceLog(8, ("DV: no call backs\n"));
971 } while (cbi != first);
977 * Break all delayed call backs for host. Returns 1 if all call backs
978 * successfully broken; 0 otherwise. Assumes host is h_Held and h_Locked.
979 * Must be called with VenusDown set for this host
982 BreakDelayedCallBacks(struct host *host)
986 retVal = BreakDelayedCallBacks_r(host);
992 BreakDelayedCallBacks_r(struct host *host)
994 struct AFSFid fids[AFSCBMAX];
995 u_byte thead[AFSCBMAX]; /* This should match thead in struct Callback */
996 int cbi, first, nfids;
1000 struct rx_connection *cb_conn;
1002 cbstuff.nbreakers++;
1003 if (!(host->hostFlags & RESETDONE) && !(host->hostFlags & HOSTDELETED)) {
1004 host->hostFlags &= ~ALTADDR; /* alternate addresses are invalid */
1005 cb_conn = host->callback_rxcon;
1006 rx_GetConnection(cb_conn);
1007 if (host->interface) {
1010 RXAFSCB_InitCallBackState3(cb_conn, &FS_HostUUID);
1013 code = RXAFSCB_InitCallBackState(cb_conn);
1015 rx_PutConnection(cb_conn);
1018 host->hostFlags |= ALTADDR; /* alternate addresses are valid */
1022 ("CB: Call back connect back failed (in break delayed) for Host %x (%s:%d)\n",
1023 host, afs_inet_ntoa_r(host->host, hoststr),
1024 ntohs(host->port)));
1026 host->hostFlags |= VENUSDOWN;
1029 ("InitCallBackState success on %x (%s:%d)\n",
1030 host, afs_inet_ntoa_r(host->host, hoststr), ntohs(host->port)));
1031 /* reset was done successfully */
1032 host->hostFlags |= RESETDONE;
1033 host->hostFlags &= ~VENUSDOWN;
1036 while (!(host->hostFlags & HOSTDELETED)) {
1038 host->hostFlags &= ~VENUSDOWN; /* presume up */
1039 cbi = first = host->cblist;
1043 first = host->cblist;
1046 if (cb->status == CB_DELAYED) {
1047 register struct FileEntry *fe = itofe(cb->fhead);
1048 thead[nfids] = cb->thead;
1049 fids[nfids].Volume = fe->volid;
1050 fids[nfids].Vnode = fe->vnode;
1051 fids[nfids].Unique = fe->unique;
1057 } while (cbi && cbi != first && nfids < AFSCBMAX);
1063 if (XCallBackBulk_r(host, fids, nfids)) {
1064 /* Failed, again: put them back, probably with old
1069 ("CB: XCallBackBulk failed, Host %x (%s:%d); callback list follows:\n",
1070 host, afs_inet_ntoa_r(host->host, hoststr),
1071 ntohs(host->port)));
1073 for (i = 0; i < nfids; i++) {
1076 ("CB: Host %x (%s:%d), file %u.%u.%u (part of bulk callback)\n",
1077 host, afs_inet_ntoa_r(host->host, hoststr),
1078 ntohs(host->port), fids[i].Volume,
1079 fids[i].Vnode, fids[i].Unique));
1082 * AddCallBack1_r(host, &fids[i], itot(thead[i]), CB_DELAYED, 1);
1083 * * but it turns out to cause too many tricky locking problems.
1084 * * now, if break delayed fails, screw it. */
1086 host->hostFlags |= VENUSDOWN; /* Failed */
1087 ClearHostCallbacks_r(host, 1 /* locked */ );
1091 if (nfids < AFSCBMAX)
1095 cbstuff.nbreakers--;
1096 /* If we succeeded it's always ok to unset HFE_LATER */
1097 if (!(host->hostFlags & VENUSDOWN))
1098 host->hostFlags &= ~HFE_LATER;
1099 return (host->hostFlags & VENUSDOWN);
1103 ** isheld is 0 if the host is held in h_Enumerate
1104 ** isheld is 1 if the host is held in BreakVolumeCallBacks
1107 MultiBreakVolumeCallBack_r(struct host *host, int isheld,
1108 struct VCBParams *parms, int deletefe)
1113 return isheld; /* host is held only by h_Enumerate, do nothing */
1114 if (host->hostFlags & HOSTDELETED)
1115 return 0; /* host is deleted, release hold */
1117 if (host->hostFlags & VENUSDOWN) {
1119 if (host->hostFlags & HOSTDELETED) {
1121 return 0; /* Release hold */
1124 ("BVCB: volume call back for Host %x (%s:%d) failed\n",
1125 host, afs_inet_ntoa_r(host->host, hoststr), ntohs(host->port)));
1128 ("CB: volume callback for Host %x (%s:%d) failed\n",
1129 host, afs_inet_ntoa_r(host->host, hoststr),
1130 ntohs(host->port)));
1132 DeleteAllCallBacks_r(host, deletefe); /* Delete all callback state
1133 * rather than attempting to
1134 * selectively remember to
1135 * delete the volume callbacks
1137 host->hostFlags &= ~RESETDONE; /* Do InitCallBackState when host returns */
1139 return 0; /* release hold */
1141 assert(parms->ncbas <= MAX_CB_HOSTS);
1143 /* Do not call MultiBreakCallBack on the current host structure
1144 ** because it would prematurely release the hold on the host
1146 if (parms->ncbas == MAX_CB_HOSTS) {
1147 struct AFSCBFids tf;
1149 tf.AFSCBFids_len = 1;
1150 tf.AFSCBFids_val = parms->fid;
1152 /* this releases all the hosts */
1153 MultiBreakCallBack_r(parms->cba, parms->ncbas, &tf, 0 /* xhost */ );
1157 parms->cba[parms->ncbas].hp = host;
1158 parms->cba[(parms->ncbas)++].thead = parms->thead;
1159 return 1; /* DON'T release hold, because we still need it. */
1163 ** isheld is 0 if the host is held in h_Enumerate
1164 ** isheld is 1 if the host is held in BreakVolumeCallBacks
1167 MultiBreakVolumeCallBack(struct host *host, int isheld, void *rock)
1169 struct VCBParams *parms = (struct VCBParams *) rock;
1173 retval = MultiBreakVolumeCallBack_r(host, isheld, parms, 1);
1179 ** isheld is 0 if the host is held in h_Enumerate
1180 ** isheld is 1 if the host is held in BreakVolumeCallBacks
1183 MultiBreakVolumeLaterCallBack(struct host *host, int isheld, void *rock)
1185 struct VCBParams *parms = (struct VCBParams *)rock;
1188 retval = MultiBreakVolumeCallBack_r(host, isheld, parms, 0);
1194 * Break all call backs on a single volume. Don't call this with any
1195 * hosts h_held. Note that this routine clears the callbacks before
1196 * actually breaking them, and that the vnode isn't locked during this
1197 * operation, so that people might see temporary callback loss while
1198 * this function is executing. It is just a temporary state, however,
1199 * since the callback will be broken later by this same function.
1201 * Now uses multi-RX for CallBack RPC. Note that the
1202 * multiBreakCallBacks routine does not force a reset if the RPC
1203 * fails, unlike the previous version of this routine, but does create
1204 * a delayed callback. Resets will be forced if the host is
1205 * determined to be down before the RPC is executed.
1208 BreakVolumeCallBacks(afs_uint32 volume)
1213 struct CallBack *cb;
1214 struct FileEntry *fe;
1216 struct VCBParams henumParms;
1217 afs_uint32 tthead = 0; /* zero is illegal value */
1220 fid.Volume = volume, fid.Vnode = fid.Unique = 0;
1221 for (hash = 0; hash < FEHASH_SIZE; hash++) {
1222 for (feip = &HashTable[hash]; (fe = itofe(*feip));) {
1223 if (fe->volid == volume) {
1224 register struct CallBack *cbnext;
1225 for (cb = itocb(fe->firstcb); cb; cb = cbnext) {
1226 host = h_itoh(cb->hhead);
1228 cbnext = itocb(cb->cnext);
1229 if (!tthead || (TNorm(tthead) < TNorm(cb->thead))) {
1235 /* leave hold for MultiBreakVolumeCallBack to clear */
1246 /* didn't find any callbacks, so return right away. */
1250 henumParms.ncbas = 0;
1251 henumParms.fid = &fid;
1252 henumParms.thead = tthead;
1254 h_Enumerate(MultiBreakVolumeCallBack, &henumParms);
1256 if (henumParms.ncbas) { /* do left-overs */
1257 struct AFSCBFids tf;
1258 tf.AFSCBFids_len = 1;
1259 tf.AFSCBFids_val = &fid;
1261 MultiBreakCallBack_r(henumParms.cba, henumParms.ncbas, &tf, 0);
1263 henumParms.ncbas = 0;
1269 #ifdef AFS_PTHREAD_ENV
1270 extern pthread_cond_t fsync_cond;
1272 extern char fsync_wait[];
1276 BreakVolumeCallBacksLater(afs_uint32 volume)
1280 struct FileEntry *fe;
1281 struct CallBack *cb;
1285 ViceLog(25, ("Setting later on volume %u\n", volume));
1287 for (hash = 0; hash < FEHASH_SIZE; hash++) {
1288 for (feip = &HashTable[hash]; (fe = itofe(*feip)) != NULL; ) {
1289 if (fe->volid == volume) {
1290 register struct CallBack *cbnext;
1291 for (cb = itocb(fe->firstcb); cb; cb = cbnext) {
1292 host = h_itoh(cb->hhead);
1293 host->hostFlags |= HFE_LATER;
1294 cb->status = CB_DELAYED;
1295 cbnext = itocb(cb->cnext);
1298 fe->status |= FE_LATER;
1307 /* didn't find any callbacks, so return right away. */
1311 ViceLog(25, ("Fsync thread wakeup\n"));
1312 #ifdef AFS_PTHREAD_ENV
1314 assert(pthread_cond_broadcast(&fsync_cond) == 0);
1317 LWP_NoYieldSignal(fsync_wait);
1323 BreakLaterCallBacks(void)
1328 struct CallBack *cb;
1329 struct FileEntry *fe = NULL;
1330 struct FileEntry *myfe = NULL;
1332 struct VCBParams henumParms;
1333 unsigned short tthead = 0; /* zero is illegal value */
1337 ViceLog(25, ("Looking for FileEntries to unchain\n"));
1340 /* Pick the first volume we see to clean up */
1341 fid.Volume = fid.Vnode = fid.Unique = 0;
1343 for (hash = 0; hash < FEHASH_SIZE; hash++) {
1344 for (feip = &HashTable[hash]; (fe = itofe(*feip)) != NULL; ) {
1345 if (fe && (fe->status & FE_LATER)
1346 && (fid.Volume == 0 || fid.Volume == fe->volid)) {
1347 /* Ugly, but used to avoid left side casting */
1348 struct object *tmpfe;
1350 ("Unchaining for %u:%u:%u\n", fe->vnode, fe->unique,
1352 fid.Volume = fe->volid;
1354 fe->status &= ~FE_LATER; /* not strictly needed */
1355 /* Works since volid is deeper than the largest pointer */
1356 tmpfe = (struct object *)fe;
1357 tmpfe->next = (struct object *)myfe;
1370 /* loop over FEs from myfe and free/break */
1372 for (fe = myfe; fe;) {
1373 register struct CallBack *cbnext;
1374 for (cb = itocb(fe->firstcb); cb; cb = cbnext) {
1375 cbnext = itocb(cb->cnext);
1376 host = h_itoh(cb->hhead);
1377 if (cb->status == CB_DELAYED) {
1379 if (!tthead || (TNorm(tthead) < TNorm(cb->thead))) {
1384 CDel(cb, 0); /* Don't let CDel clean up the fe */
1385 /* leave hold for MultiBreakVolumeCallBack to clear */
1388 ("Found host %x (%s:%d) non-DELAYED cb for %u:%u:%u\n",
1389 host, afs_inet_ntoa_r(host->host, hoststr),
1390 ntohs(host->port), fe->vnode, fe->unique, fe->volid));
1394 fe = (struct FileEntry *)((struct object *)fe)->next;
1399 ViceLog(125, ("Breaking volume %u\n", fid.Volume));
1400 henumParms.ncbas = 0;
1401 henumParms.fid = &fid;
1402 henumParms.thead = tthead;
1404 h_Enumerate(MultiBreakVolumeLaterCallBack, (char *)&henumParms);
1406 if (henumParms.ncbas) { /* do left-overs */
1407 struct AFSCBFids tf;
1408 tf.AFSCBFids_len = 1;
1409 tf.AFSCBFids_val = &fid;
1411 MultiBreakCallBack_r(henumParms.cba, henumParms.ncbas, &tf, 0);
1412 henumParms.ncbas = 0;
1417 /* Arrange to be called again */
1422 * Delete all timed-out call back entries (to be called periodically by file
1426 CleanupTimedOutCallBacks(void)
1429 CleanupTimedOutCallBacks_r();
1435 CleanupTimedOutCallBacks_r(void)
1437 afs_uint32 now = CBtime(FT_ApproxTime());
1438 register afs_uint32 *thead;
1439 register struct CallBack *cb;
1440 register int ntimedout = 0;
1443 while (tfirst <= now) {
1445 cbi = *(thead = THead(tfirst));
1451 ("CCB: deleting timed out call back %x (%s:%d), (%u,%u,%u)\n",
1452 h_itoh(cb->hhead)->host,
1453 afs_inet_ntoa_r(h_itoh(cb->hhead)->host, hoststr),
1454 h_itoh(cb->hhead)->port, itofe(cb->fhead)->volid,
1455 itofe(cb->fhead)->vnode, itofe(cb->fhead)->unique));
1459 if (ntimedout > cbstuff.nblks) {
1460 ViceLog(0, ("CCB: Internal Error -- shutting down...\n"));
1461 DumpCallBackState();
1462 ShutDownAndCore(PANIC);
1464 } while (cbi != *thead);
1469 cbstuff.CBsTimedOut += ntimedout;
1470 ViceLog(7, ("CCB: deleted %d timed out callbacks\n", ntimedout));
1471 return (ntimedout > 0);
1474 static struct host *lih_host;
1475 static int lih_host_held;
1477 /* This version does not allow 'host' to be selected unless its ActiveCall
1478 * is newer than 'hostp' which is the host with the oldest ActiveCall from
1479 * the last pass (if it is provided). We filter out any hosts that are
1480 * are held by other threads.
1483 lih0_r(register struct host *host, register int held, void *rock)
1485 struct host *hostp = (struct host *) rock;
1487 && (hostp && host != hostp)
1488 && (!held && !h_OtherHolds_r(host))
1489 && (!lih_host || host->ActiveCall < lih_host->ActiveCall)
1490 && (!hostp || host->ActiveCall > hostp->ActiveCall)) {
1491 if (lih_host != NULL && lih_host_held) {
1492 h_Release_r(lih_host);
1495 lih_host_held = !held;
1501 /* This version does not allow 'host' to be selected unless its ActiveCall
1502 * is newer than 'hostp' which is the host with the oldest ActiveCall from
1503 * the last pass (if it is provided). In this second varient, we do not
1504 * prevent held hosts from being selected.
1507 lih1_r(register struct host *host, register int held, void *rock)
1509 struct host *hostp = (struct host *) rock;
1512 && (hostp && host != hostp)
1513 && (!lih_host || host->ActiveCall < lih_host->ActiveCall)
1514 && (!hostp || host->ActiveCall > hostp->ActiveCall)) {
1515 if (lih_host != NULL && lih_host_held) {
1516 h_Release_r(lih_host);
1519 lih_host_held = !held;
1525 /* This could be upgraded to get more space each time */
1526 /* first pass: sequentially find the oldest host which isn't held by
1527 anyone for which we can clear callbacks;
1529 /* second pass: sequentially find the oldest host regardless of
1530 whether or not the host is held; skipping 'hostp' */
1531 /* third pass: attempt to clear callbacks from 'hostp' */
1532 /* always called with hostp unlocked */
1534 /* Note: hostlist is ordered most recently created host first and
1535 * its order has no relationship to the most recently used. */
1536 extern struct host *hostList;
1538 GetSomeSpace_r(struct host *hostp, int locked)
1540 register struct host *hp, *hp1, *hp2;
1543 cbstuff.GotSomeSpaces++;
1545 ("GSS: First looking for timed out call backs via CleanupCallBacks\n"));
1546 if (CleanupTimedOutCallBacks_r()) {
1556 h_Enumerate_r(i == 0 ? lih0_r : lih1_r, hp2, (char *)hp1);
1559 /* set in lih_r! private copy before giving up H_LOCK */
1560 int lih_host_held2=lih_host_held;
1562 if ((hp != hostp) && !ClearHostCallbacks_r(hp, 0 /* not locked or held */ )) {
1573 * Next time try getting callbacks from any host even if
1574 * it's deleted (that's actually great since we can freely
1575 * remove its callbacks) or it's held since the only other
1576 * option is starvation for the file server (i.e. until the
1577 * callback timeout arrives).
1584 ("GSS: Try harder for longest inactive host cnt= %d\n",
1589 /* Could not obtain space from other hosts, clear hostp's callback state */
1594 ClearHostCallbacks_r(hostp, 1 /*already locked */ );
1601 /* locked - set if caller has already locked the host */
1603 ClearHostCallbacks_r(struct host *hp, int locked)
1608 struct rx_connection *cb_conn = NULL;
1611 ("GSS: Delete longest inactive host %x (%s:%d)\n",
1612 hp, afs_inet_ntoa_r(hp->host, hoststr), ntohs(hp->port)));
1613 if (!(held = h_Held_r(hp)))
1616 /** Try a non-blocking lock. If the lock is already held return
1617 * after releasing hold on hp
1620 if (h_NBLock_r(hp)) {
1626 if (hp->Console & 2) {
1628 * If the special console field is set it means that a thread
1629 * is waiting in AddCallBack1 after it set pointers to the
1630 * file entry and/or callback entry. Because of the bogus
1631 * usage of h_hold it won't prevent from another thread, this
1632 * one, to remove all the callbacks so just to be safe we keep
1633 * a reference. NOTE, on the last phase we'll free the calling
1634 * host's callbacks but that's ok...
1638 DeleteAllCallBacks_r(hp, 1);
1639 if (hp->hostFlags & VENUSDOWN) {
1640 hp->hostFlags &= ~RESETDONE; /* remember that we must do a reset */
1642 /* host is up, try a call */
1643 hp->hostFlags &= ~ALTADDR; /* alternate addresses are invalid */
1644 cb_conn = hp->callback_rxcon;
1645 rx_GetConnection(hp->callback_rxcon);
1646 if (hp->interface) {
1649 RXAFSCB_InitCallBackState3(cb_conn, &FS_HostUUID);
1652 code = RXAFSCB_InitCallBackState(cb_conn);
1654 rx_PutConnection(cb_conn);
1657 hp->hostFlags |= ALTADDR; /* alternate addresses are valid */
1659 /* failed, mark host down and need reset */
1660 hp->hostFlags |= VENUSDOWN;
1661 hp->hostFlags &= ~RESETDONE;
1663 /* reset succeeded, we're done */
1664 hp->hostFlags |= RESETDONE;
1675 #endif /* INTERPRET_DUMP */
1679 PrintCallBackStats(void)
1682 "%d add CB, %d break CB, %d del CB, %d del FE, %d CB's timed out, %d space reclaim, %d del host\n",
1683 cbstuff.AddCallBacks, cbstuff.BreakCallBacks,
1684 cbstuff.DeleteCallBacks, cbstuff.DeleteFiles, cbstuff.CBsTimedOut,
1685 cbstuff.GotSomeSpaces, cbstuff.DeleteAllCallBacks);
1686 fprintf(stderr, "%d CBs, %d FEs, (%d of total of %d 16-byte blocks)\n",
1687 cbstuff.nCBs, cbstuff.nFEs, cbstuff.nCBs + cbstuff.nFEs,
1693 #define MAGIC 0x12345678 /* To check byte ordering of dump when it is read in */
1694 #define MAGICV2 0x12345679 /* To check byte ordering & version of dump when it is read in */
1697 #ifndef INTERPRET_DUMP
1699 #ifdef AFS_DEMAND_ATTACH_FS
1702 * callback state serialization
1704 static int cb_stateSaveTimeouts(struct fs_dump_state * state);
1705 static int cb_stateSaveFEHash(struct fs_dump_state * state);
1706 static int cb_stateSaveFEs(struct fs_dump_state * state);
1707 static int cb_stateSaveFE(struct fs_dump_state * state, struct FileEntry * fe);
1708 static int cb_stateRestoreTimeouts(struct fs_dump_state * state);
1709 static int cb_stateRestoreFEHash(struct fs_dump_state * state);
1710 static int cb_stateRestoreFEs(struct fs_dump_state * state);
1711 static int cb_stateRestoreFE(struct fs_dump_state * state);
1712 static int cb_stateRestoreCBs(struct fs_dump_state * state, struct FileEntry * fe,
1713 struct iovec * iov, int niovecs);
1715 static int cb_stateVerifyFEHash(struct fs_dump_state * state);
1716 static int cb_stateVerifyFE(struct fs_dump_state * state, struct FileEntry * fe);
1717 static int cb_stateVerifyFCBList(struct fs_dump_state * state, struct FileEntry * fe);
1718 static int cb_stateVerifyTimeoutQueues(struct fs_dump_state * state);
1720 static int cb_stateFEToDiskEntry(struct FileEntry *, struct FEDiskEntry *);
1721 static int cb_stateDiskEntryToFE(struct fs_dump_state * state,
1722 struct FEDiskEntry *, struct FileEntry *);
1724 static int cb_stateCBToDiskEntry(struct CallBack *, struct CBDiskEntry *);
1725 static int cb_stateDiskEntryToCB(struct fs_dump_state * state,
1726 struct CBDiskEntry *, struct CallBack *);
1728 static int cb_stateFillHeader(struct callback_state_header * hdr);
1729 static int cb_stateCheckHeader(struct callback_state_header * hdr);
1731 static int cb_stateAllocMap(struct fs_dump_state * state);
1734 cb_stateSave(struct fs_dump_state * state)
1738 AssignInt64(state->eof_offset, &state->hdr->cb_offset);
1740 /* invalidate callback state header */
1741 memset(state->cb_hdr, 0, sizeof(struct callback_state_header));
1742 if (fs_stateWriteHeader(state, &state->hdr->cb_offset, state->cb_hdr,
1743 sizeof(struct callback_state_header))) {
1748 fs_stateIncEOF(state, sizeof(struct callback_state_header));
1750 /* dump timeout state */
1751 if (cb_stateSaveTimeouts(state)) {
1756 /* dump fe hashtable state */
1757 if (cb_stateSaveFEHash(state)) {
1762 /* dump callback state */
1763 if (cb_stateSaveFEs(state)) {
1768 /* write the callback state header to disk */
1769 cb_stateFillHeader(state->cb_hdr);
1770 if (fs_stateWriteHeader(state, &state->hdr->cb_offset, state->cb_hdr,
1771 sizeof(struct callback_state_header))) {
1781 cb_stateRestore(struct fs_dump_state * state)
1785 if (fs_stateReadHeader(state, &state->hdr->cb_offset, state->cb_hdr,
1786 sizeof(struct callback_state_header))) {
1791 if (cb_stateCheckHeader(state->cb_hdr)) {
1796 if (cb_stateAllocMap(state)) {
1801 if (cb_stateRestoreTimeouts(state)) {
1806 if (cb_stateRestoreFEHash(state)) {
1811 /* restore FEs and CBs from disk */
1812 if (cb_stateRestoreFEs(state)) {
1817 /* restore the timeout queue heads */
1818 tfirst = state->cb_hdr->tfirst;
1825 cb_stateRestoreIndices(struct fs_dump_state * state)
1828 struct FileEntry * fe;
1829 struct CallBack * cb;
1831 /* restore indices in the FileEntry structures */
1832 for (i = 1; i < state->fe_map.len; i++) {
1833 if (state->fe_map.entries[i].new_idx) {
1834 fe = itofe(state->fe_map.entries[i].new_idx);
1836 /* restore the fe->fnext entry */
1837 if (fe_OldToNew(state, fe->fnext, &fe->fnext)) {
1842 /* restore the fe->firstcb entry */
1843 if (cb_OldToNew(state, fe->firstcb, &fe->firstcb)) {
1850 /* restore indices in the CallBack structures */
1851 for (i = 1; i < state->cb_map.len; i++) {
1852 if (state->cb_map.entries[i].new_idx) {
1853 cb = itocb(state->cb_map.entries[i].new_idx);
1855 /* restore the cb->cnext entry */
1856 if (cb_OldToNew(state, cb->cnext, &cb->cnext)) {
1861 /* restore the cb->fhead entry */
1862 if (fe_OldToNew(state, cb->fhead, &cb->fhead)) {
1867 /* restore the cb->hhead entry */
1868 if (h_OldToNew(state, cb->hhead, &cb->hhead)) {
1873 /* restore the cb->tprev entry */
1874 if (cb_OldToNew(state, cb->tprev, &cb->tprev)) {
1879 /* restore the cb->tnext entry */
1880 if (cb_OldToNew(state, cb->tnext, &cb->tnext)) {
1885 /* restore the cb->hprev entry */
1886 if (cb_OldToNew(state, cb->hprev, &cb->hprev)) {
1891 /* restore the cb->hnext entry */
1892 if (cb_OldToNew(state, cb->hnext, &cb->hnext)) {
1899 /* restore the timeout queue head indices */
1900 for (i = 0; i < state->cb_timeout_hdr->records; i++) {
1901 if (cb_OldToNew(state, timeout[i], &timeout[i])) {
1907 /* restore the FE hash table queue heads */
1908 for (i = 0; i < state->cb_fehash_hdr->records; i++) {
1909 if (fe_OldToNew(state, HashTable[i], &HashTable[i])) {
1920 cb_stateVerify(struct fs_dump_state * state)
1924 if (cb_stateVerifyFEHash(state)) {
1928 if (cb_stateVerifyTimeoutQueues(state)) {
1937 cb_stateVerifyFEHash(struct fs_dump_state * state)
1940 struct FileEntry * fe;
1941 afs_uint32 fei, chain_len;
1943 for (i = 0; i < FEHASH_SIZE; i++) {
1945 for (fei = HashTable[i], fe = itofe(fei);
1947 fei = fe->fnext, fe = itofe(fei)) {
1948 if (fei > cbstuff.nblks) {
1949 ViceLog(0, ("cb_stateVerifyFEHash: error: index out of range (fei=%d)\n", fei));
1953 if (cb_stateVerifyFE(state, fe)) {
1956 if (chain_len > FS_STATE_FE_MAX_HASH_CHAIN_LEN) {
1957 ViceLog(0, ("cb_stateVerifyFEHash: error: hash chain %d length exceeds %d; assuming there's a loop\n",
1958 i, FS_STATE_FE_MAX_HASH_CHAIN_LEN));
1971 cb_stateVerifyFE(struct fs_dump_state * state, struct FileEntry * fe)
1975 if ((fe->firstcb && !fe->ncbs) ||
1976 (!fe->firstcb && fe->ncbs)) {
1977 ViceLog(0, ("cb_stateVerifyFE: error: fe->firstcb does not agree with fe->ncbs (fei=%d, fe->firstcb=%d, fe->ncbs=%d)\n",
1978 fetoi(fe), fe->firstcb, fe->ncbs));
1981 if (cb_stateVerifyFCBList(state, fe)) {
1982 ViceLog(0, ("cb_stateVerifyFE: error: FCBList failed verification (fei=%d)\n", fetoi(fe)));
1991 cb_stateVerifyFCBList(struct fs_dump_state * state, struct FileEntry * fe)
1994 afs_uint32 cbi, fei, chain_len = 0;
1995 struct CallBack * cb;
1999 for (cbi = fe->firstcb, cb = itocb(cbi);
2001 cbi = cb->cnext, cb = itocb(cbi)) {
2002 if (cbi > cbstuff.nblks) {
2003 ViceLog(0, ("cb_stateVerifyFCBList: error: list index out of range (cbi=%d, ncbs=%d)\n",
2004 cbi, cbstuff.nblks));
2008 if (cb->fhead != fei) {
2009 ViceLog(0, ("cb_stateVerifyFCBList: error: cb->fhead != fei (fei=%d, cb->fhead=%d)\n",
2013 if (chain_len > FS_STATE_FCB_MAX_LIST_LEN) {
2014 ViceLog(0, ("cb_stateVerifyFCBList: error: list length exceeds %d (fei=%d); assuming there's a loop\n",
2015 FS_STATE_FCB_MAX_LIST_LEN, fei));
2022 if (fe->ncbs != chain_len) {
2023 ViceLog(0, ("cb_stateVerifyFCBList: error: list length mismatch (len=%d, fe->ncbs=%d)\n",
2024 chain_len, fe->ncbs));
2033 cb_stateVerifyHCBList(struct fs_dump_state * state, struct host * host)
2036 afs_uint32 hi, chain_len, cbi;
2037 struct CallBack *cb, *ncb;
2042 for (cbi = host->cblist, cb = itocb(cbi);
2044 cbi = cb->hnext, cb = ncb) {
2045 if (chain_len && (host->cblist == cbi)) {
2046 /* we've wrapped around the circular list, and everything looks ok */
2049 if (cb->hhead != hi) {
2050 ViceLog(0, ("cb_stateVerifyHCBList: error: incorrect cb->hhead (cbi=%d, h->index=%d, cb->hhead=%d)\n",
2051 cbi, hi, cb->hhead));
2054 if (!cb->hprev || !cb->hnext) {
2055 ViceLog(0, ("cb_stateVerifyHCBList: error: null index in circular list (cbi=%d, h->index=%d)\n",
2060 if ((cb->hprev > cbstuff.nblks) ||
2061 (cb->hnext > cbstuff.nblks)) {
2062 ViceLog(0, ("cb_stateVerifyHCBList: error: list index out of range (cbi=%d, h->index=%d, cb->hprev=%d, cb->hnext=%d, nCBs=%d)\n",
2063 cbi, hi, cb->hprev, cb->hnext, cbstuff.nblks));
2067 ncb = itocb(cb->hnext);
2068 if (cbi != ncb->hprev) {
2069 ViceLog(0, ("cb_stateVerifyHCBList: error: corrupt linked list (cbi=%d, h->index=%d)\n",
2074 if (chain_len > FS_STATE_HCB_MAX_LIST_LEN) {
2075 ViceLog(0, ("cb_stateVerifyFCBList: error: list length exceeds %d (h->index=%d); assuming there's a loop\n",
2076 FS_STATE_HCB_MAX_LIST_LEN, hi));
2088 cb_stateVerifyTimeoutQueues(struct fs_dump_state * state)
2091 afs_uint32 cbi, chain_len;
2092 struct CallBack *cb, *ncb;
2094 for (i = 0; i < CB_NUM_TIMEOUT_QUEUES; i++) {
2096 for (cbi = timeout[i], cb = itocb(cbi);
2098 cbi = cb->tnext, cb = ncb) {
2099 if (chain_len && (cbi == timeout[i])) {
2100 /* we've wrapped around the circular list, and everything looks ok */
2103 if (cbi > cbstuff.nblks) {
2104 ViceLog(0, ("cb_stateVerifyTimeoutQueues: error: list index out of range (cbi=%d, tindex=%d)\n",
2109 if (itot(cb->thead) != &timeout[i]) {
2110 ViceLog(0, ("cb_stateVerifyTimeoutQueues: error: cb->thead points to wrong timeout queue (tindex=%d, cbi=%d, cb->thead=%d)\n",
2111 i, cbi, cb->thead));
2114 if (!cb->tprev || !cb->tnext) {
2115 ViceLog(0, ("cb_stateVerifyTimeoutQueues: null index in circular list (cbi=%d, tindex=%d)\n",
2120 if ((cb->tprev > cbstuff.nblks) ||
2121 (cb->tnext > cbstuff.nblks)) {
2122 ViceLog(0, ("cb_stateVerifyTimeoutQueues: list index out of range (cbi=%d, tindex=%d, cb->tprev=%d, cb->tnext=%d, nCBs=%d)\n",
2123 cbi, i, cb->tprev, cb->tnext, cbstuff.nblks));
2127 ncb = itocb(cb->tnext);
2128 if (cbi != ncb->tprev) {
2129 ViceLog(0, ("cb_stateVerifyTimeoutQueues: corrupt linked list (cbi=%d, tindex=%d)\n",
2134 if (chain_len > FS_STATE_TCB_MAX_LIST_LEN) {
2135 ViceLog(0, ("cb_stateVerifyTimeoutQueues: list length exceeds %d (tindex=%d); assuming there's a loop\n",
2136 FS_STATE_TCB_MAX_LIST_LEN, i));
2149 cb_stateSaveTimeouts(struct fs_dump_state * state)
2152 struct iovec iov[2];
2154 AssignInt64(state->eof_offset, &state->cb_hdr->timeout_offset);
2156 memset(state->cb_timeout_hdr, 0, sizeof(struct callback_state_fehash_header));
2157 state->cb_timeout_hdr->magic = CALLBACK_STATE_TIMEOUT_MAGIC;
2158 state->cb_timeout_hdr->records = CB_NUM_TIMEOUT_QUEUES;
2159 state->cb_timeout_hdr->len = sizeof(struct callback_state_timeout_header) +
2160 (state->cb_timeout_hdr->records * sizeof(afs_uint32));
2162 iov[0].iov_base = (char *)state->cb_timeout_hdr;
2163 iov[0].iov_len = sizeof(struct callback_state_timeout_header);
2164 iov[1].iov_base = (char *)timeout;
2165 iov[1].iov_len = sizeof(timeout);
2167 if (fs_stateSeek(state, &state->cb_hdr->timeout_offset)) {
2172 if (fs_stateWriteV(state, iov, 2)) {
2177 fs_stateIncEOF(state, state->cb_timeout_hdr->len);
2184 cb_stateRestoreTimeouts(struct fs_dump_state * state)
2188 if (fs_stateReadHeader(state, &state->cb_hdr->timeout_offset,
2189 state->cb_timeout_hdr,
2190 sizeof(struct callback_state_timeout_header))) {
2195 if (state->cb_timeout_hdr->magic != CALLBACK_STATE_TIMEOUT_MAGIC) {
2199 if (state->cb_timeout_hdr->records != CB_NUM_TIMEOUT_QUEUES) {
2204 len = state->cb_timeout_hdr->records * sizeof(afs_uint32);
2206 if (state->cb_timeout_hdr->len !=
2207 (sizeof(struct callback_state_timeout_header) + len)) {
2212 if (fs_stateRead(state, timeout, len)) {
2222 cb_stateSaveFEHash(struct fs_dump_state * state)
2225 struct iovec iov[2];
2227 AssignInt64(state->eof_offset, &state->cb_hdr->fehash_offset);
2229 memset(state->cb_fehash_hdr, 0, sizeof(struct callback_state_fehash_header));
2230 state->cb_fehash_hdr->magic = CALLBACK_STATE_FEHASH_MAGIC;
2231 state->cb_fehash_hdr->records = FEHASH_SIZE;
2232 state->cb_fehash_hdr->len = sizeof(struct callback_state_fehash_header) +
2233 (state->cb_fehash_hdr->records * sizeof(afs_uint32));
2235 iov[0].iov_base = (char *)state->cb_fehash_hdr;
2236 iov[0].iov_len = sizeof(struct callback_state_fehash_header);
2237 iov[1].iov_base = (char *)HashTable;
2238 iov[1].iov_len = sizeof(HashTable);
2240 if (fs_stateSeek(state, &state->cb_hdr->fehash_offset)) {
2245 if (fs_stateWriteV(state, iov, 2)) {
2250 fs_stateIncEOF(state, state->cb_fehash_hdr->len);
2257 cb_stateRestoreFEHash(struct fs_dump_state * state)
2261 if (fs_stateReadHeader(state, &state->cb_hdr->fehash_offset,
2262 state->cb_fehash_hdr,
2263 sizeof(struct callback_state_fehash_header))) {
2268 if (state->cb_fehash_hdr->magic != CALLBACK_STATE_FEHASH_MAGIC) {
2272 if (state->cb_fehash_hdr->records != FEHASH_SIZE) {
2277 len = state->cb_fehash_hdr->records * sizeof(afs_uint32);
2279 if (state->cb_fehash_hdr->len !=
2280 (sizeof(struct callback_state_fehash_header) + len)) {
2285 if (fs_stateRead(state, HashTable, len)) {
2295 cb_stateSaveFEs(struct fs_dump_state * state)
2298 register int fei, hash;
2299 register struct FileEntry *fe;
2301 AssignInt64(state->eof_offset, &state->cb_hdr->fe_offset);
2303 for (hash = 0; hash < FEHASH_SIZE ; hash++) {
2304 for (fei = HashTable[hash]; fei; fei = fe->fnext) {
2306 if (cb_stateSaveFE(state, fe)) {
2318 cb_stateRestoreFEs(struct fs_dump_state * state)
2320 int count, nFEs, ret = 0;
2322 nFEs = state->cb_hdr->nFEs;
2324 for (count = 0; count < nFEs; count++) {
2325 if (cb_stateRestoreFE(state)) {
2336 cb_stateSaveFE(struct fs_dump_state * state, struct FileEntry * fe)
2338 int ret = 0, iovcnt, cbi, written = 0;
2340 struct callback_state_entry_header hdr;
2341 struct FEDiskEntry fedsk;
2342 struct CBDiskEntry cbdsk[16];
2343 struct iovec iov[16];
2344 struct CallBack *cb;
2347 if (fei > state->cb_hdr->fe_max) {
2348 state->cb_hdr->fe_max = fei;
2351 memset(&hdr, 0, sizeof(struct callback_state_entry_header));
2353 if (cb_stateFEToDiskEntry(fe, &fedsk)) {
2358 iov[0].iov_base = (char *)&hdr;
2359 iov[0].iov_len = sizeof(hdr);
2360 iov[1].iov_base = (char *)&fedsk;
2361 iov[1].iov_len = sizeof(struct FEDiskEntry);
2364 for (cbi = fe->firstcb, cb = itocb(cbi);
2366 cbi = cb->cnext, cb = itocb(cbi), hdr.nCBs++) {
2367 if (cbi > state->cb_hdr->cb_max) {
2368 state->cb_hdr->cb_max = cbi;
2370 if (cb_stateCBToDiskEntry(cb, &cbdsk[iovcnt])) {
2374 cbdsk[iovcnt].index = cbi;
2375 iov[iovcnt].iov_base = (char *)&cbdsk[iovcnt];
2376 iov[iovcnt].iov_len = sizeof(struct CBDiskEntry);
2378 if ((iovcnt == 16) || (!cb->cnext)) {
2379 if (fs_stateWriteV(state, iov, iovcnt)) {
2388 hdr.magic = CALLBACK_STATE_ENTRY_MAGIC;
2389 hdr.len = sizeof(hdr) + sizeof(struct FEDiskEntry) +
2390 (hdr.nCBs * sizeof(struct CBDiskEntry));
2393 if (fs_stateWriteV(state, iov, iovcnt)) {
2398 if (fs_stateWriteHeader(state, &state->eof_offset, &hdr, sizeof(hdr))) {
2404 fs_stateIncEOF(state, hdr.len);
2407 if (fs_stateSeek(state, &state->eof_offset)) {
2413 state->cb_hdr->nFEs++;
2414 state->cb_hdr->nCBs += hdr.nCBs;
2421 cb_stateRestoreFE(struct fs_dump_state * state)
2423 int ret = 0, iovcnt, nCBs;
2424 struct callback_state_entry_header hdr;
2425 struct FEDiskEntry fedsk;
2426 struct CBDiskEntry cbdsk[16];
2427 struct iovec iov[16];
2428 struct FileEntry * fe;
2429 struct CallBack * cb;
2431 iov[0].iov_base = (char *)&hdr;
2432 iov[0].iov_len = sizeof(hdr);
2433 iov[1].iov_base = (char *)&fedsk;
2434 iov[1].iov_len = sizeof(fedsk);
2437 if (fs_stateReadV(state, iov, iovcnt)) {
2442 if (hdr.magic != CALLBACK_STATE_ENTRY_MAGIC) {
2449 ViceLog(0, ("cb_stateRestoreFE: ran out of free FileEntry structures\n"));
2454 if (cb_stateDiskEntryToFE(state, &fedsk, fe)) {
2460 for (iovcnt = 0, nCBs = 0;
2463 iov[iovcnt].iov_base = (char *)&cbdsk[iovcnt];
2464 iov[iovcnt].iov_len = sizeof(struct CBDiskEntry);
2466 if ((iovcnt == 16) || (nCBs == hdr.nCBs - 1)) {
2467 if (fs_stateReadV(state, iov, iovcnt)) {
2471 if (cb_stateRestoreCBs(state, fe, iov, iovcnt)) {
2485 cb_stateRestoreCBs(struct fs_dump_state * state, struct FileEntry * fe,
2486 struct iovec * iov, int niovecs)
2489 register struct CallBack * cb;
2490 struct CBDiskEntry * cbdsk;
2495 for (idx = 0; idx < niovecs; idx++) {
2496 cbdsk = (struct CBDiskEntry *) iov[idx].iov_base;
2497 if ((cb = GetCB()) == NULL) {
2498 ViceLog(0, ("cb_stateRestoreCBs: ran out of free CallBack structures\n"));
2502 if (cb_stateDiskEntryToCB(state, cbdsk, cb)) {
2503 ViceLog(0, ("cb_stateRestoreCBs: corrupt CallBack disk entry\n"));
2515 cb_stateFillHeader(struct callback_state_header * hdr)
2517 hdr->stamp.magic = CALLBACK_STATE_MAGIC;
2518 hdr->stamp.version = CALLBACK_STATE_VERSION;
2519 hdr->tfirst = tfirst;
2524 cb_stateCheckHeader(struct callback_state_header * hdr)
2528 if (hdr->stamp.magic != CALLBACK_STATE_MAGIC) {
2530 } else if (hdr->stamp.version != CALLBACK_STATE_VERSION) {
2532 } else if ((hdr->nFEs > cbstuff.nblks) || (hdr->nCBs > cbstuff.nblks)) {
2533 ViceLog(0, ("cb_stateCheckHeader: saved callback state larger than callback memory allocation\n"));
2539 /* disk entry conversion routines */
2541 cb_stateFEToDiskEntry(struct FileEntry * in, struct FEDiskEntry * out)
2543 memcpy(&out->fe, in, sizeof(struct FileEntry));
2544 out->index = fetoi(in);
2549 cb_stateDiskEntryToFE(struct fs_dump_state * state,
2550 struct FEDiskEntry * in, struct FileEntry * out)
2554 memcpy(out, &in->fe, sizeof(struct FileEntry));
2556 /* setup FE map entry */
2557 if (!in->index || (in->index >= state->fe_map.len)) {
2558 ViceLog(0, ("cb_stateDiskEntryToFE: index (%d) out of range",
2563 state->fe_map.entries[in->index].old_idx = in->index;
2564 state->fe_map.entries[in->index].new_idx = fetoi(out);
2571 cb_stateCBToDiskEntry(struct CallBack * in, struct CBDiskEntry * out)
2573 memcpy(&out->cb, in, sizeof(struct CallBack));
2574 out->index = cbtoi(in);
2579 cb_stateDiskEntryToCB(struct fs_dump_state * state,
2580 struct CBDiskEntry * in, struct CallBack * out)
2584 memcpy(out, &in->cb, sizeof(struct CallBack));
2586 /* setup CB map entry */
2587 if (!in->index || (in->index >= state->cb_map.len)) {
2588 ViceLog(0, ("cb_stateDiskEntryToCB: index (%d) out of range\n",
2593 state->cb_map.entries[in->index].old_idx = in->index;
2594 state->cb_map.entries[in->index].new_idx = cbtoi(out);
2600 /* index map routines */
2602 cb_stateAllocMap(struct fs_dump_state * state)
2604 state->fe_map.len = state->cb_hdr->fe_max + 1;
2605 state->cb_map.len = state->cb_hdr->cb_max + 1;
2606 state->fe_map.entries = (struct idx_map_entry_t *)
2607 calloc(state->fe_map.len, sizeof(struct idx_map_entry_t));
2608 state->cb_map.entries = (struct idx_map_entry_t *)
2609 calloc(state->cb_map.len, sizeof(struct idx_map_entry_t));
2610 return ((state->fe_map.entries != NULL) && (state->cb_map.entries != NULL)) ? 0 : 1;
2614 fe_OldToNew(struct fs_dump_state * state, afs_uint32 old, afs_uint32 * new)
2618 /* FEs use a one-based indexing system, so old==0 implies no mapping */
2624 if (old >= state->fe_map.len) {
2625 ViceLog(0, ("fe_OldToNew: index %d is out of range\n", old));
2627 } else if (state->fe_map.entries[old].old_idx != old) { /* sanity check */
2628 ViceLog(0, ("fe_OldToNew: index %d points to an invalid FileEntry record\n", old));
2631 *new = state->fe_map.entries[old].new_idx;
2639 cb_OldToNew(struct fs_dump_state * state, afs_uint32 old, afs_uint32 * new)
2643 /* CBs use a one-based indexing system, so old==0 implies no mapping */
2649 if (old >= state->cb_map.len) {
2650 ViceLog(0, ("cb_OldToNew: index %d is out of range\n", old));
2652 } else if (state->cb_map.entries[old].old_idx != old) { /* sanity check */
2653 ViceLog(0, ("cb_OldToNew: index %d points to an invalid CallBack record\n", old));
2656 *new = state->cb_map.entries[old].new_idx;
2662 #endif /* AFS_DEMAND_ATTACH_FS */
2665 DumpCallBackState(void)
2668 afs_uint32 magic = MAGICV2, now = (afs_int32) FT_ApproxTime(), freelisthead;
2670 oflag = O_WRONLY | O_CREAT | O_TRUNC;
2674 fd = open(AFSDIR_SERVER_CBKDUMP_FILEPATH, oflag, 0666);
2677 ("Couldn't create callback dump file %s\n",
2678 AFSDIR_SERVER_CBKDUMP_FILEPATH));
2681 (void)write(fd, &magic, sizeof(magic));
2682 (void)write(fd, &now, sizeof(now));
2683 (void)write(fd, &cbstuff, sizeof(cbstuff));
2684 (void)write(fd, TimeOuts, sizeof(TimeOuts));
2685 (void)write(fd, timeout, sizeof(timeout));
2686 (void)write(fd, &tfirst, sizeof(tfirst));
2687 freelisthead = cbtoi((struct CallBack *)CBfree);
2688 (void)write(fd, &freelisthead, sizeof(freelisthead)); /* This is a pointer */
2689 freelisthead = fetoi((struct FileEntry *)FEfree);
2690 (void)write(fd, &freelisthead, sizeof(freelisthead)); /* This is a pointer */
2691 (void)write(fd, HashTable, sizeof(HashTable));
2692 (void)write(fd, &CB[1], sizeof(CB[1]) * cbstuff.nblks); /* CB stuff */
2693 (void)write(fd, &FE[1], sizeof(FE[1]) * cbstuff.nblks); /* FE stuff */
2699 #endif /* !INTERPRET_DUMP */
2701 #ifdef INTERPRET_DUMP
2703 /* This is only compiled in for the callback analyzer program */
2704 /* Returns the time of the dump */
2706 ReadDump(char *file, int timebits)
2709 afs_uint32 magic, freelisthead;
2711 #ifdef AFS_64BIT_ENV
2719 fd = open(file, oflag);
2721 fprintf(stderr, "Couldn't read dump file %s\n", file);
2724 read(fd, &magic, sizeof(magic));
2725 if (magic == MAGICV2) {
2728 if (magic != MAGIC) {
2730 "Magic number of %s is invalid. You might be trying to\n",
2733 "run this program on a machine type with a different byte ordering.\n");
2737 #ifdef AFS_64BIT_ENV
2738 if (timebits == 64) {
2739 read(fd, &now64, sizeof(afs_int64));
2740 now = (afs_int32) now64;
2743 read(fd, &now, sizeof(afs_int32));
2744 read(fd, &cbstuff, sizeof(cbstuff));
2745 read(fd, TimeOuts, sizeof(TimeOuts));
2746 read(fd, timeout, sizeof(timeout));
2747 read(fd, &tfirst, sizeof(tfirst));
2748 read(fd, &freelisthead, sizeof(freelisthead));
2749 CB = ((struct CallBack
2750 *)(calloc(cbstuff.nblks, sizeof(struct CallBack)))) - 1;
2751 FE = ((struct FileEntry
2752 *)(calloc(cbstuff.nblks, sizeof(struct FileEntry)))) - 1;
2753 CBfree = (struct CallBack *)itocb(freelisthead);
2754 read(fd, &freelisthead, sizeof(freelisthead));
2755 FEfree = (struct FileEntry *)itofe(freelisthead);
2756 read(fd, HashTable, sizeof(HashTable));
2757 read(fd, &CB[1], sizeof(CB[1]) * cbstuff.nblks); /* CB stuff */
2758 read(fd, &FE[1], sizeof(FE[1]) * cbstuff.nblks); /* FE stuff */
2760 perror("Error reading dumpfile");
2767 #include "AFS_component_version_number.h"
2769 #include "AFS_component_version_number.c"
2773 main(int argc, char **argv)
2775 int err = 0, cbi = 0, stats = 0, noptions = 0, all = 0, vol = 0, raw = 0;
2777 register struct FileEntry *fe;
2778 register struct CallBack *cb;
2782 memset(&fid, 0, sizeof(fid));
2785 while (argc && **argv == '-') {
2788 if (!strcmp(*argv, "-host")) {
2794 cbi = atoi(*++argv);
2795 } else if (!strcmp(*argv, "-fid")) {
2801 fid.Volume = atoi(*++argv);
2802 fid.Vnode = atoi(*++argv);
2803 fid.Unique = atoi(*++argv);
2804 } else if (!strcmp(*argv, "-time")) {
2805 fprintf(stderr, "-time not supported\n");
2807 } else if (!strcmp(*argv, "-stats")) {
2809 } else if (!strcmp(*argv, "-all")) {
2811 } else if (!strcmp(*argv, "-raw")) {
2813 } else if (!strcmp(*argv, "-timebits")) {
2819 timebits = atoi(*++argv);
2820 if ((timebits != 32)
2821 #ifdef AFS_64BIT_ENV
2826 } else if (!strcmp(*argv, "-volume")) {
2832 vol = atoi(*++argv);
2837 if (err || argc != 1) {
2839 "Usage: cbd [-host cbid] [-fid volume vnode] [-stats] [-all] [-timebits 32"
2840 #ifdef AFS_64BIT_ENV
2843 "] callbackdumpfile\n");
2845 "[cbid is shown for each host in the hosts.dump file]\n");
2848 now = ReadDump(*argv, timebits);
2849 if (stats || noptions == 0) {
2850 time_t uxtfirst = UXtime(tfirst), tnow = now;
2851 printf("The time of the dump was %u %s", (unsigned int) now, ctime(&tnow));
2852 printf("The last time cleanup ran was %u %s", (unsigned int) uxtfirst,
2854 PrintCallBackStats();
2859 struct CallBack *cb;
2860 struct FileEntry *fe;
2862 for (hash = 0; hash < FEHASH_SIZE; hash++) {
2863 for (feip = &HashTable[hash]; (fe = itofe(*feip));) {
2864 if (!vol || (fe->volid == vol)) {
2865 register struct CallBack *cbnext;
2866 for (cb = itocb(fe->firstcb); cb; cb = cbnext) {
2868 cbnext = itocb(cb->cnext);
2878 afs_uint32 cfirst = cbi;
2883 } while (cbi != cfirst);
2888 printf("No callback entries for %u.%u\n", fid.Volume, fid.Vnode);
2891 cb = itocb(fe->firstcb);
2894 cb = itocb(cb->cnext);
2899 for (i = 1; i < cbstuff.nblks; i++) {
2900 p = (afs_int32 *) & FE[i];
2901 printf("%d:%12x%12x%12x%12x\n", i, p[0], p[1], p[2], p[3]);
2908 PrintCB(register struct CallBack *cb, afs_uint32 now)
2910 struct FileEntry *fe = itofe(cb->fhead);
2911 time_t expires = TIndexToTime(cb->thead);
2916 printf("vol=%u vn=%u cbs=%d hi=%d st=%d fest=%d, exp in %lu secs at %s",
2917 fe->volid, fe->vnode, fe->ncbs, cb->hhead, cb->status, fe->status,
2918 expires - now, ctime(&expires));
2923 #if !defined(INTERPRET_DUMP)
2925 ** try breaking calbacks on afidp from host. Use multi_rx.
2926 ** return 0 on success, non-zero on failure
2929 MultiBreakCallBackAlternateAddress(struct host *host, struct AFSCBFids *afidp)
2933 retVal = MultiBreakCallBackAlternateAddress_r(host, afidp);
2939 MultiBreakCallBackAlternateAddress_r(struct host *host,
2940 struct AFSCBFids *afidp)
2943 struct rx_connection **conns;
2944 struct rx_connection *connSuccess = 0;
2945 struct AddrPort *interfaces;
2946 static struct rx_securityClass *sc = 0;
2947 static struct AFSCBs tc = { 0, 0 };
2950 /* nothing more can be done */
2951 if (!host->interface)
2952 return 1; /* failure */
2954 /* the only address is the primary interface */
2955 if (host->interface->numberOfInterfaces <= 1)
2956 return 1; /* failure */
2958 /* initialise a security object only once */
2960 sc = rxnull_NewClientSecurityObject();
2962 i = host->interface->numberOfInterfaces;
2963 interfaces = calloc(i, sizeof(struct AddrPort));
2964 conns = calloc(i, sizeof(struct rx_connection *));
2965 if (!interfaces || !conns) {
2967 ("Failed malloc in MultiBreakCallBackAlternateAddress_r\n"));
2971 /* initialize alternate rx connections */
2972 for (i = 0, j = 0; i < host->interface->numberOfInterfaces; i++) {
2973 /* this is the current primary address */
2974 if (host->host == host->interface->interface[i].addr &&
2975 host->port == host->interface->interface[i].port)
2978 interfaces[j] = host->interface->interface[i];
2980 rx_NewConnection(interfaces[j].addr,
2981 interfaces[j].port, 1, sc, 0);
2982 rx_SetConnDeadTime(conns[j], 2);
2983 rx_SetConnHardDeadTime(conns[j], AFS_HARDDEADTIME);
2987 assert(j); /* at least one alternate address */
2989 ("Starting multibreakcall back on all addr for host %x (%s:%d)\n",
2990 host, afs_inet_ntoa_r(host->host, hoststr), ntohs(host->port)));
2992 multi_Rx(conns, j) {
2993 multi_RXAFSCB_CallBack(afidp, &tc);
2997 if (host->callback_rxcon)
2998 rx_DestroyConnection(host->callback_rxcon);
2999 host->callback_rxcon = conns[multi_i];
3000 h_DeleteHostFromAddrHashTable_r(host->host, host->port, host);
3001 host->host = interfaces[multi_i].addr;
3002 host->port = interfaces[multi_i].port;
3003 h_AddHostToAddrHashTable_r(host->host, host->port, host);
3004 connSuccess = conns[multi_i];
3005 rx_SetConnDeadTime(host->callback_rxcon, 50);
3006 rx_SetConnHardDeadTime(host->callback_rxcon, AFS_HARDDEADTIME);
3008 ("multibreakcall success with addr %s:%d\n",
3009 afs_inet_ntoa_r(interfaces[multi_i].addr, hoststr),
3010 ntohs(interfaces[multi_i].port)));
3017 /* Destroy all connections except the one on which we succeeded */
3018 for (i = 0; i < j; i++)
3019 if (conns[i] != connSuccess)
3020 rx_DestroyConnection(conns[i]);
3026 return 0; /* success */
3028 return 1; /* failure */
3033 ** try multi_RX probes to host.
3034 ** return 0 on success, non-0 on failure
3037 MultiProbeAlternateAddress_r(struct host *host)
3040 struct rx_connection **conns;
3041 struct rx_connection *connSuccess = 0;
3042 struct AddrPort *interfaces;
3043 static struct rx_securityClass *sc = 0;
3046 /* nothing more can be done */
3047 if (!host->interface)
3048 return 1; /* failure */
3050 /* the only address is the primary interface */
3051 if (host->interface->numberOfInterfaces <= 1)
3052 return 1; /* failure */
3054 /* initialise a security object only once */
3056 sc = rxnull_NewClientSecurityObject();
3058 i = host->interface->numberOfInterfaces;
3059 interfaces = calloc(i, sizeof(struct AddrPort));
3060 conns = calloc(i, sizeof(struct rx_connection *));
3061 if (!interfaces || !conns) {
3062 ViceLog(0, ("Failed malloc in MultiProbeAlternateAddress_r\n"));
3066 /* initialize alternate rx connections */
3067 for (i = 0, j = 0; i < host->interface->numberOfInterfaces; i++) {
3068 /* this is the current primary address */
3069 if (host->host == host->interface->interface[i].addr &&
3070 host->port == host->interface->interface[i].port)
3073 interfaces[j] = host->interface->interface[i];
3075 rx_NewConnection(interfaces[j].addr,
3076 interfaces[j].port, 1, sc, 0);
3077 rx_SetConnDeadTime(conns[j], 2);
3078 rx_SetConnHardDeadTime(conns[j], AFS_HARDDEADTIME);
3082 assert(j); /* at least one alternate address */
3084 ("Starting multiprobe on all addr for host %x (%s:%d)\n",
3085 host, afs_inet_ntoa_r(host->host, hoststr),
3086 ntohs(host->port)));
3088 multi_Rx(conns, j) {
3089 multi_RXAFSCB_ProbeUuid(&host->interface->uuid);
3093 if (host->callback_rxcon)
3094 rx_DestroyConnection(host->callback_rxcon);
3095 host->callback_rxcon = conns[multi_i];
3096 h_DeleteHostFromAddrHashTable_r(host->host, host->port, host);
3097 host->host = interfaces[multi_i].addr;
3098 host->port = interfaces[multi_i].port;
3099 h_AddHostToAddrHashTable_r(host->host, host->port, host);
3100 connSuccess = conns[multi_i];
3101 rx_SetConnDeadTime(host->callback_rxcon, 50);
3102 rx_SetConnHardDeadTime(host->callback_rxcon, AFS_HARDDEADTIME);
3104 ("multiprobe success with addr %s:%d\n",
3105 afs_inet_ntoa_r(interfaces[multi_i].addr, hoststr),
3106 ntohs(interfaces[multi_i].port)));
3111 ("multiprobe failure with addr %s:%d\n",
3112 afs_inet_ntoa_r(interfaces[multi_i].addr, hoststr),
3113 ntohs(interfaces[multi_i].port)));
3115 /* This is less than desirable but its the best we can do.
3116 * The AFS Cache Manager will return either 0 for a Uuid
3117 * match and a 1 for a non-match. If the error is 1 we
3118 * therefore know that our mapping of IP address to Uuid
3119 * is wrong. We should attempt to find the correct
3120 * Uuid and fix the host tables.
3122 if (multi_error == 1) {
3123 /* remove the current alternate address from this host */
3125 removeInterfaceAddr_r(host, interfaces[multi_i].addr, interfaces[multi_i].port);
3129 #ifdef AFS_DEMAND_ATTACH_FS
3130 /* try to bail ASAP if the fileserver is shutting down */
3132 if (fs_state.mode == FS_MODE_SHUTDOWN) {
3141 /* Destroy all connections except the one on which we succeeded */
3142 for (i = 0; i < j; i++)
3143 if (conns[i] != connSuccess)
3144 rx_DestroyConnection(conns[i]);
3150 return 0; /* success */
3152 return 1; /* failure */
3155 #endif /* !defined(INTERPRET_DUMP) */