2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "../afs/param.h"
15 #include "../afs/sysincludes.h" /* Standard vendor system headers */
16 #include "../afs/afsincludes.h" /* Afs-based standard headers */
17 #include "../afs/afs_stats.h" /* statistics gathering code */
18 #include "../afs/afs_cbqueue.h"
20 #include <sys/adspace.h> /* for vm_att(), vm_det() */
24 /* background request queue size */
25 afs_lock_t afs_xbrs; /* lock for brs */
26 static int brsInit = 0;
27 short afs_brsWaiters = 0; /* number of users waiting for brs buffers */
28 short afs_brsDaemons = 0; /* number of daemons waiting for brs requests */
29 struct brequest afs_brs[NBRS]; /* request structures */
30 struct afs_osi_WaitHandle AFS_WaitHandler, AFS_CSWaitHandler;
32 static int rxepoch_checked=0;
33 #define afs_CheckRXEpoch() {if (rxepoch_checked == 0 && rxkad_EpochWasSet) { \
34 rxepoch_checked = 1; afs_GCUserData(/* force flag */ 1); } }
36 extern char afs_rootVolumeName[];
37 extern struct vcache *afs_globalVp;
38 extern struct VenusFid afs_rootFid;
39 extern struct osi_dev cacheDev;
40 extern char *afs_indexFlags;
41 extern afs_rwlock_t afs_xvcache;
42 extern struct afs_exporter *afs_nfsexporter;
43 extern int cacheDiskType;
44 extern int afs_BumpBase();
45 extern void afs_CheckCallbacks();
47 /* PAG garbage collection */
48 /* We induce a compile error if param.h does not define AFS_GCPAGS */
49 afs_int32 afs_gcpags=AFS_GCPAGS;
50 afs_int32 afs_gcpags_procsize;
52 afs_int32 afs_CheckServerDaemonStarted = 0;
53 afs_int32 PROBE_INTERVAL=180; /* default to 3 min */
55 #define PROBE_WAIT() (1000 * (PROBE_INTERVAL - ((afs_random() & 0x7fffffff) \
56 % (PROBE_INTERVAL/2))))
58 afs_CheckServerDaemon()
60 afs_int32 now, delay, lastCheck, last10MinCheck;
62 afs_CheckServerDaemonStarted = 1;
64 while (afs_initState < 101) afs_osi_Sleep(&afs_initState);
65 afs_osi_Wait(PROBE_WAIT(), &AFS_CSWaitHandler, 0);
67 last10MinCheck = lastCheck = osi_Time();
69 if (afs_termState == AFSOP_STOP_CS) {
70 afs_termState = AFSOP_STOP_BKG;
71 afs_osi_Wakeup(&afs_termState);
76 if (PROBE_INTERVAL + lastCheck <= now) {
77 afs_CheckServers(1, (struct cell *) 0); /* check down servers */
78 lastCheck = now = osi_Time();
81 if (600 + last10MinCheck <= now) {
82 afs_Trace1(afs_iclSetp, CM_TRACE_PROBEUP, ICL_TYPE_INT32, 600);
83 afs_CheckServers(0, (struct cell *) 0);
84 last10MinCheck = now = osi_Time();
87 if (afs_termState == AFSOP_STOP_CS) {
88 afs_termState = AFSOP_STOP_BKG;
89 afs_osi_Wakeup(&afs_termState);
93 /* Compute time to next probe. */
94 delay = PROBE_INTERVAL + lastCheck;
95 if (delay > 600 + last10MinCheck)
96 delay = 600 + last10MinCheck;
100 afs_osi_Wait(delay * 1000, &AFS_CSWaitHandler, 0);
102 afs_CheckServerDaemonStarted = 0;
107 extern struct afs_exporter *root_exported;
108 struct afs_exporter *exporter;
110 afs_int32 last3MinCheck, last10MinCheck, last60MinCheck, lastNMinCheck;
111 afs_int32 last1MinCheck;
112 afs_uint32 lastCBSlotBump;
115 AFS_STATCNT(afs_Daemon);
116 last1MinCheck = last3MinCheck = last60MinCheck = last10MinCheck = lastNMinCheck = 0;
118 afs_rootFid.Fid.Volume = 0;
119 while (afs_initState < 101) afs_osi_Sleep(&afs_initState);
122 lastCBSlotBump = now;
124 /* when a lot of clients are booted simultaneously, they develop
125 * annoying synchronous VL server bashing behaviors. So we stagger them.
127 last1MinCheck = now + ((afs_random() & 0x7fffffff) % 60); /* an extra 30 */
128 last3MinCheck = now - 90 + ((afs_random() & 0x7fffffff) % 180);
129 last60MinCheck = now - 1800 + ((afs_random() & 0x7fffffff) % 3600);
130 last10MinCheck = now - 300 + ((afs_random() & 0x7fffffff) % 600);
131 lastNMinCheck = now - 90 + ((afs_random() & 0x7fffffff) % 180);
133 /* start off with afs_initState >= 101 (basic init done) */
135 afs_CheckCallbacks(20); /* unstat anything which will expire soon */
137 /* things to do every 20 seconds or less - required by protocol spec */
139 afs_FlushActiveVcaches(0); /* flush NFS writes */
140 afs_FlushVCBs(1); /* flush queued callbacks */
141 afs_MaybeWakeupTruncateDaemon(); /* free cache space if have too */
142 rx_CheckPackets(); /* Does RX need more packets? */
143 #if defined(AFS_AIX32_ENV) || defined(AFS_HPUX_ENV)
145 * Hack: We always want to make sure there are plenty free
146 * entries in the small free pool so that we don't have to
147 * worry about rx (with disabled interrupts) to have to call
148 * malloc). So we do the dummy call below...
150 if (((afs_stats_cmperf.SmallBlocksAlloced - afs_stats_cmperf.SmallBlocksActive)
151 <= AFS_SALLOC_LOW_WATER))
152 osi_FreeSmallSpace(osi_AllocSmallSpace(AFS_SMALLOCSIZ));
153 if (((afs_stats_cmperf.MediumBlocksAlloced - afs_stats_cmperf.MediumBlocksActive)
154 <= AFS_MALLOC_LOW_WATER+50))
155 osi_AllocMoreMSpace(AFS_MALLOC_LOW_WATER * 2);
159 if (lastCBSlotBump + CBHTSLOTLEN < now) { /* pretty time-dependant */
160 lastCBSlotBump = now;
161 if (afs_BumpBase()) {
162 afs_CheckCallbacks(20); /* unstat anything which will expire soon */
166 if (last1MinCheck + 60 < now) {
167 /* things to do every minute */
168 DFlush(); /* write out dir buffers */
169 afs_WriteThroughDSlots(); /* write through cacheinfo entries */
170 afs_FlushActiveVcaches(1);/* keep flocks held & flush nfs writes */
175 if (last3MinCheck + 180 < now) {
176 afs_CheckTokenCache(); /* check for access cache resets due to expired
180 if (!afs_CheckServerDaemonStarted) {
181 /* Do the check here if the correct afsd is not installed. */
184 printf("Please install afsd with check server daemon.\n");
186 if (lastNMinCheck + PROBE_INTERVAL < now) {
187 /* only check down servers */
188 afs_CheckServers(1, (struct cell *) 0);
192 if (last10MinCheck + 600 < now) {
193 #ifdef AFS_USERSPACE_IP_ADDR
194 extern int rxi_GetcbiInfo(void);
196 afs_Trace1(afs_iclSetp, CM_TRACE_PROBEUP,
197 ICL_TYPE_INT32, 600);
198 #ifdef AFS_USERSPACE_IP_ADDR
199 if (rxi_GetcbiInfo()) { /* addresses changed from last time */
202 #else /* AFS_USERSPACE_IP_ADDR */
203 if (rxi_GetIFInfo()) { /* addresses changed from last time */
206 #endif /* else AFS_USERSPACE_IP_ADDR */
207 if (!afs_CheckServerDaemonStarted)
208 afs_CheckServers(0, (struct cell *) 0);
209 afs_GCUserData(0); /* gc old conns */
210 /* This is probably the wrong way of doing GC for the various exporters but it will suffice for a while */
211 for (exporter = root_exported; exporter; exporter = exporter->exp_next) {
212 (void) EXP_GC(exporter, 0); /* Generalize params */
217 afs_CheckVolumeNames(AFS_VOLCHECK_EXPIRED |
221 afs_CheckVolumeNames(AFS_VOLCHECK_EXPIRED |
226 last10MinCheck = now;
228 if (last60MinCheck + 3600 < now) {
229 afs_Trace1(afs_iclSetp, CM_TRACE_PROBEVOLUME,
230 ICL_TYPE_INT32, 3600);
231 afs_CheckRootVolume();
233 if (afs_gcpags == AFS_GCPAGS_OK) {
238 last60MinCheck = now;
240 if (afs_initState < 300) { /* while things ain't rosy */
241 code = afs_CheckRootVolume();
242 if (code == 0) afs_initState = 300; /* succeeded */
243 if (afs_initState < 200) afs_initState = 200; /* tried once */
244 afs_osi_Wakeup(&afs_initState);
247 /* 18285 is because we're trying to divide evenly into 128, that is,
248 * CBSlotLen, while staying just under 20 seconds. If CBSlotLen
249 * changes, should probably change this interval, too.
250 * Some of the preceding actions may take quite some time, so we
251 * might not want to wait the entire interval */
252 now = 18285 - (osi_Time() - now);
254 afs_osi_Wait(now, &AFS_WaitHandler, 0);
257 if (afs_termState == AFSOP_STOP_AFS) {
258 if (afs_CheckServerDaemonStarted)
259 afs_termState = AFSOP_STOP_CS;
261 afs_termState = AFSOP_STOP_BKG;
262 afs_osi_Wakeup(&afs_termState);
268 afs_CheckRootVolume () {
269 char rootVolName[32];
270 register struct volume *tvp;
271 int usingDynroot = afs_GetDynrootEnable();
273 AFS_STATCNT(afs_CheckRootVolume);
274 if (*afs_rootVolumeName == 0) {
275 strcpy(rootVolName, "root.afs");
278 strcpy(rootVolName, afs_rootVolumeName);
281 afs_GetDynrootFid(&afs_rootFid);
282 tvp = afs_GetVolume(&afs_rootFid, (struct vrequest *) 0, READ_LOCK);
284 tvp = afs_GetVolumeByName(rootVolName, LOCALCELL, 1, (struct vrequest *) 0, READ_LOCK);
288 int len = strlen(rootVolName);
290 if ((len < 9) || strcmp(&rootVolName[len - 9], ".readonly")) {
291 strcpy(buf, rootVolName);
292 afs_strcat(buf, ".readonly");
293 tvp = afs_GetVolumeByName(buf, LOCALCELL, 1, (struct vrequest *) 0, READ_LOCK);
298 int volid = (tvp->roVol? tvp->roVol : tvp->volume);
299 afs_rootFid.Cell = LOCALCELL;
300 if (afs_rootFid.Fid.Volume && afs_rootFid.Fid.Volume != volid
302 /* If we had a root fid before and it changed location we reset
303 * the afs_globalVp so that it will be reevaluated.
304 * Just decrement the reference count. This only occurs during
305 * initial cell setup and can panic the machine if we set the
306 * count to zero and fs checkv is executed when the current
309 AFS_FAST_RELE(afs_globalVp);
312 afs_rootFid.Fid.Volume = volid;
313 afs_rootFid.Fid.Vnode = 1;
314 afs_rootFid.Fid.Unique = 1;
316 afs_initState = 300; /* won */
317 afs_osi_Wakeup(&afs_initState);
318 afs_PutVolume(tvp, READ_LOCK);
321 /* This is to make sure that we update the root gnode */
322 /* every time root volume gets released */
324 extern struct vfs *afs_globalVFS;
325 extern int afs_root();
326 struct gnode *rootgp;
330 /* Only do this if afs_globalVFS is properly set due to race conditions
331 this routine could be called before the gfs_mount is performed!
332 Furthermore, afs_root (called below) *waits* until
333 initState >= 200, so we don't try this until we've gotten
335 if (afs_globalVFS && afs_initState >= 200) {
336 if (code = afs_root(afs_globalVFS, &rootgp))
338 mp = (struct mount *) afs_globalVFS->vfs_data ;
339 mp->m_rootgp = gget(mp, 0, 0, (char *)rootgp);
340 afs_unlock(mp->m_rootgp); /* unlock basic gnode */
341 afs_vrele((struct vcache *) rootgp); /* zap afs_root's vnode hold */
345 if (afs_rootFid.Fid.Volume) return 0;
349 /* parm 0 is the pathname, parm 1 to the fetch is the chunk number */
351 register struct brequest *ab; {
352 register struct dcache *tdc;
355 #ifdef AFS_LINUX22_ENV
356 struct dentry *dp = NULL;
358 afs_int32 offset, len;
359 struct vrequest treq;
363 if (code = afs_InitReq(&treq, ab->cred)) return;
365 #ifdef AFS_LINUX22_ENV
366 code = gop_lookupname((char *)ab->parm[0], AFS_UIOSYS, 1, (struct vnode **) 0, &dp);
368 tvn = (struct vnode*)dp->d_inode;
370 code = gop_lookupname((char *)ab->parm[0], AFS_UIOSYS, 1, (struct vnode **) 0, (struct vnode **)&tvn);
373 osi_FreeLargeSpace((char *)ab->parm[0]); /* free path name buffer here */
375 /* now path may not have been in afs, so check that before calling our cache manager */
376 if (!tvn || !IsAfsVnode((struct vnode *) tvn)) {
377 /* release it and give up */
382 #ifdef AFS_LINUX22_ENV
385 AFS_RELE((struct vnode *) tvn);
392 tvc = (struct vcache *) afs_gntovn(tvn);
394 tvc = (struct vcache *) tvn;
396 /* here we know its an afs vnode, so we can get the data for the chunk */
397 tdc = afs_GetDCache(tvc, ab->parm[1], &treq, &offset, &len, 1);
404 #ifdef AFS_LINUX22_ENV
407 AFS_RELE((struct vnode *) tvn);
412 /* parm 0 to the fetch is the chunk number; parm 1 is the dcache entry to wakeup,
413 * parm 2 is true iff we should release the dcache entry here.
416 register struct brequest *ab; {
417 register struct dcache *tdc;
418 register struct vcache *tvc;
419 afs_int32 offset, len;
420 struct vrequest treq;
422 AFS_STATCNT(BPrefetch);
423 if (len = afs_InitReq(&treq, ab->cred)) return;
425 tdc = afs_GetDCache(tvc, (afs_int32)ab->parm[0], &treq, &offset, &len, 1);
429 /* now, dude may be waiting for us to clear DFFetchReq bit; do so. Can't
430 * use tdc from GetDCache since afs_GetDCache may fail, but someone may
431 * be waiting for our wakeup anyway.
433 tdc = (struct dcache *) (ab->parm[1]);
434 tdc->flags &= ~DFFetchReq;
435 afs_osi_Wakeup(&tdc->validPos);
438 mutex_enter(&tdc->lock);
440 mutex_exit(&tdc->lock);
442 afs_PutDCache(tdc); /* put this one back, too */
449 register struct brequest *ab; {
450 register struct vcache *tvc;
451 register afs_int32 code;
452 struct vrequest treq;
453 #if defined(AFS_SGI_ENV)
454 struct cred *tmpcred;
458 if (code = afs_InitReq(&treq, ab->cred)) return;
461 #if defined(AFS_SGI_ENV)
463 * Since StoreOnLastReference can end up calling osi_SyncVM which
464 * calls into VM code that assumes that u.u_cred has the
465 * correct credentials, we set our to theirs for this xaction
467 tmpcred = OSI_GET_CURRENT_CRED();
468 OSI_SET_CURRENT_CRED(ab->cred);
471 * To avoid recursion since the WriteLock may be released during VM
472 * operations, we hold the VOP_RWLOCK across this transaction as
473 * do the other callers of StoreOnLastReference
475 AFS_RWLOCK((vnode_t *)tvc, 1);
477 ObtainWriteLock(&tvc->lock,209);
478 code = afs_StoreOnLastReference(tvc, &treq);
479 ReleaseWriteLock(&tvc->lock);
480 #if defined(AFS_SGI_ENV)
481 OSI_SET_CURRENT_CRED(tmpcred);
482 AFS_RWUNLOCK((vnode_t *)tvc, 1);
484 /* now set final return code, and wakeup anyone waiting */
485 if ((ab->flags & BUVALID) == 0) {
486 ab->code = afs_CheckCode(code, &treq, 43); /* set final code, since treq doesn't go across processes */
487 ab->flags |= BUVALID;
488 if (ab->flags & BUWAIT) {
489 ab->flags &= ~BUWAIT;
495 /* release a held request buffer */
496 void afs_BRelease(ab)
497 register struct brequest *ab; {
499 AFS_STATCNT(afs_BRelease);
500 MObtainWriteLock(&afs_xbrs,294);
501 if (--ab->refCount <= 0) {
504 if (afs_brsWaiters) afs_osi_Wakeup(&afs_brsWaiters);
505 MReleaseWriteLock(&afs_xbrs);
508 /* return true if bkg fetch daemons are all busy */
510 AFS_STATCNT(afs_BBusy);
511 if (afs_brsDaemons > 0) return 0;
515 struct brequest *afs_BQueue(aopcode, avc, dontwait, ause, acred, aparm0, aparm1, aparm2, aparm3)
516 register short aopcode;
517 afs_int32 ause, dontwait;
518 register struct vcache *avc;
519 struct AFS_UCRED *acred;
520 /* On 64 bit platforms, "long" does the right thing. */
521 long aparm0, aparm1, aparm2, aparm3;
524 register struct brequest *tb;
526 AFS_STATCNT(afs_BQueue);
527 MObtainWriteLock(&afs_xbrs,296);
530 for(i=0;i<NBRS;i++,tb++) {
531 if (tb->refCount == 0) break;
535 tb->opcode = aopcode;
543 VN_HOLD((struct vnode *)avc);
546 tb->refCount = ause+1;
547 tb->parm[0] = aparm0;
548 tb->parm[1] = aparm1;
549 tb->parm[2] = aparm2;
550 tb->parm[3] = aparm3;
553 /* if daemons are waiting for work, wake them up */
554 if (afs_brsDaemons > 0) {
555 afs_osi_Wakeup(&afs_brsDaemons);
557 MReleaseWriteLock(&afs_xbrs);
561 MReleaseWriteLock(&afs_xbrs);
562 return (struct brequest *)0;
564 /* no free buffers, sleep a while */
566 MReleaseWriteLock(&afs_xbrs);
567 afs_osi_Sleep(&afs_brsWaiters);
568 MObtainWriteLock(&afs_xbrs,301);
575 /* AIX 4.1 has a much different sleep/wakeup mechanism available for use.
576 * The modifications here will work for either a UP or MP machine.
578 struct buf *afs_asyncbuf = (struct buf*)0;
579 afs_int32 afs_asyncbuf_cv = EVENT_NULL;
580 afs_int32 afs_biodcnt = 0;
582 /* in implementing this, I assumed that all external linked lists were
585 * Several places in this code traverse a linked list. The algorithm
586 * used here is probably unfamiliar to most people. Careful examination
587 * will show that it eliminates an assignment inside the loop, as compared
588 * to the standard algorithm, at the cost of occasionally using an extra
594 * This function obtains, and returns, a pointer to a buffer for
595 * processing by a daemon. It sleeps until such a buffer is available.
596 * The source of buffers for it is the list afs_asyncbuf (see also
597 * naix_vm_strategy). This function may be invoked concurrently by
598 * several processes, that is, several instances of the same daemon.
599 * naix_vm_strategy, which adds buffers to the list, runs at interrupt
600 * level, while get_bioreq runs at process level.
602 * Since AIX 4.1 can wake just one process at a time, the separate sleep
603 * addresses have been removed.
604 * Note that the kernel_lock is held until the e_sleep_thread() occurs.
605 * The afs_asyncbuf_lock is primarily used to serialize access between
606 * process and interrupts.
608 Simple_lock afs_asyncbuf_lock;
609 /*static*/ struct buf *afs_get_bioreq()
611 struct buf *bp = (struct buf *) 0;
613 struct buf **bestlbpP, **lbpP;
615 struct buf *t1P, *t2P; /* temp pointers for list manipulation */
618 struct afs_bioqueue *s;
620 /* ??? Does the forward pointer of the returned buffer need to be NULL?
623 /* Disable interrupts from the strategy function, and save the
624 * prior priority level and lock access to the afs_asyncbuf.
627 oldPriority = disable_lock(INTMAX, &afs_asyncbuf_lock) ;
631 /* look for oldest buffer */
632 bp = bestbp = afs_asyncbuf;
633 bestage = (int) bestbp->av_back;
634 bestlbpP = &afs_asyncbuf;
639 if ((int) bp->av_back - bestage < 0) {
642 bestage = (int) bp->av_back;
646 *bestlbpP = bp->av_forw;
650 /* If afs_asyncbuf is null, it is necessary to go to sleep.
651 * e_wakeup_one() ensures that only one thread wakes.
654 /* The LOCK_HANDLER indicates to e_sleep_thread to only drop the
655 * lock on an MP machine.
657 interrupted = e_sleep_thread(&afs_asyncbuf_cv,
659 LOCK_HANDLER|INTERRUPTIBLE);
660 if (interrupted==THREAD_INTERRUPTED) {
661 /* re-enable interrupts from strategy */
662 unlock_enable(oldPriority, &afs_asyncbuf_lock);
666 } /* end of "else asyncbuf is empty" */
667 } /* end of "inner loop" */
671 unlock_enable(oldPriority, &afs_asyncbuf_lock);
674 /* For the convenience of other code, replace the gnodes in
675 * the b_vp field of bp and the other buffers on the b_work
676 * chain with the corresponding vnodes.
678 * ??? what happens to the gnodes? They're not just cut loose,
682 t2P = (struct buf *) t1P->b_work;
683 t1P->b_vp = ((struct gnode *) t1P->b_vp)->gn_vnode;
687 t1P = (struct buf *) t2P->b_work;
688 t2P->b_vp = ((struct gnode *) t2P->b_vp)->gn_vnode;
693 /* If the buffer does not specify I/O, it may immediately
694 * be returned to the caller. This condition is detected
695 * by examining the buffer's flags (the b_flags field). If
696 * the B_PFPROT bit is set, the buffer represents a protection
697 * violation, rather than a request for I/O. The remainder
698 * of the outer loop handles the case where the B_PFPROT bit is clear.
700 if (bp->b_flags & B_PFPROT) {
705 } /* end of function get_bioreq() */
710 * This function is the daemon. It is called from the syscall
711 * interface. Ordinarily, a script or an administrator will run a
712 * daemon startup utility, specifying the number of I/O daemons to
713 * run. The utility will fork off that number of processes,
714 * each making the appropriate syscall, which will cause this
715 * function to be invoked.
717 static int afs_initbiod = 0; /* this is self-initializing code */
719 afs_BioDaemon (nbiods)
722 afs_int32 code, s, pflg = 0;
724 struct buf *bp, *bp1, *tbp1, *tbp2; /* temp pointers only */
732 /* pin lock, since we'll be using it in an interrupt. */
733 lock_alloc(&afs_asyncbuf_lock, LOCK_ALLOC_PIN, 2, 1);
734 simple_lock_init(&afs_asyncbuf_lock);
735 pin (&afs_asyncbuf, sizeof(struct buf*));
736 pin (&afs_asyncbuf_cv, sizeof(afs_int32));
739 /* Ignore HUP signals... */
742 sigset_t sigbits, osigbits;
744 * add SIGHUP to the set of already masked signals
746 SIGFILLSET(sigbits); /* allow all signals */
747 SIGDELSET(sigbits, SIGHUP); /* except SIGHUP */
748 limit_sigs(&sigbits, &osigbits); /* and already masked */
751 SIGDELSET(u.u_procp->p_sig, SIGHUP);
752 SIGADDSET(u.u_procp->p_sigignore, SIGHUP);
753 SIGDELSET(u.u_procp->p_sigcatch, SIGHUP);
755 /* Main body starts here -- this is an intentional infinite loop, and
758 * Now, the loop will exit if get_bioreq() returns NULL, indicating
759 * that we've been interrupted.
762 bp = afs_get_bioreq();
764 break; /* we were interrupted */
765 if (code = setjmpx(&jmpbuf)) {
766 /* This should not have happend, maybe a lack of resources */
768 s = disable_lock(INTMAX, &afs_asyncbuf_lock);
769 for (bp1 = bp; bp ; bp = bp1) {
771 bp1 = (struct buf *) bp1->b_work;
774 bp->b_flags |= B_ERROR;
777 unlock_enable(s, &afs_asyncbuf_lock);
781 vcp = (struct vcache *)bp->b_vp;
782 if (bp->b_flags & B_PFSTORE) { /* XXXX */
783 ObtainWriteLock(&vcp->lock,404);
784 if (vcp->v.v_gnode->gn_mwrcnt) {
785 if (vcp->m.Length < bp->b_bcount + (u_int)dbtob(bp->b_blkno))
786 vcp->m.Length = bp->b_bcount + (u_int)dbtob(bp->b_blkno);
788 ReleaseWriteLock(&vcp->lock);
790 /* If the buffer represents a protection violation, rather than
791 * an actual request for I/O, no special action need be taken.
793 if ( bp->b_flags & B_PFPROT ) {
794 iodone (bp); /* Notify all users of the buffer that we're done */
799 ObtainWriteLock(&vcp->pvmlock,211);
801 * First map its data area to a region in the current address space
802 * by calling vm_att with the subspace identifier, and a pointer to
803 * the data area. vm_att returns a new data area pointer, but we
804 * also want to hang onto the old one.
806 tmpaddr = bp->b_baddr;
807 bp->b_baddr = vm_att (bp->b_xmemd.subspace_id, tmpaddr);
808 tmperr = afs_ustrategy(bp); /* temp variable saves offset calculation */
809 if (tmperr) { /* in non-error case */
810 bp->b_flags |= B_ERROR; /* should other flags remain set ??? */
811 bp->b_error = tmperr;
814 /* Unmap the buffer's data area by calling vm_det. Reset data area
815 * to the value that we saved above.
817 vm_det(bp->b_un.b_addr);
818 bp->b_baddr = tmpaddr;
821 * buffer may be linked with other buffers via the b_work field.
822 * See also naix_vm_strategy. For each buffer in the chain (including
823 * bp) notify all users of the buffer that the daemon is finished
824 * using it by calling iodone.
825 * assumes iodone can modify the b_work field.
828 tbp2 = (struct buf *) tbp1->b_work;
833 tbp1 = (struct buf *) tbp2->b_work;
839 ReleaseWriteLock(&vcp->pvmlock); /* Unlock the vnode. */
841 } /* infinite loop (unless we're interrupted) */
842 } /* end of afs_BioDaemon() */
844 #else /* AFS_AIX41_ENV */
848 struct afs_bioqueue {
853 struct afs_bioqueue afs_bioqueue;
854 struct buf *afs_busyq = NULL;
855 struct buf *afs_asyncbuf;
856 afs_int32 afs_biodcnt = 0;
858 /* in implementing this, I assumed that all external linked lists were
861 * Several places in this code traverse a linked list. The algorithm
862 * used here is probably unfamiliar to most people. Careful examination
863 * will show that it eliminates an assignment inside the loop, as compared
864 * to the standard algorithm, at the cost of occasionally using an extra
870 * This function obtains, and returns, a pointer to a buffer for
871 * processing by a daemon. It sleeps until such a buffer is available.
872 * The source of buffers for it is the list afs_asyncbuf (see also
873 * naix_vm_strategy). This function may be invoked concurrently by
874 * several processes, that is, several instances of the same daemon.
875 * naix_vm_strategy, which adds buffers to the list, runs at interrupt
876 * level, while get_bioreq runs at process level.
878 * The common kernel paradigm of sleeping and waking up, in which all the
879 * competing processes sleep waiting for wakeups on one address, is not
880 * followed here. Instead, the following paradigm is used: when a daemon
881 * goes to sleep, it checks for other sleeping daemons. If there aren't any,
882 * it sleeps on the address of variable afs_asyncbuf. But if there is
883 * already a daemon sleeping on that address, it threads its own unique
884 * address onto a list, and sleeps on that address. This way, every
885 * sleeper is sleeping on a different address, and every wakeup wakes up
886 * exactly one daemon. This prevents a whole bunch of daemons from waking
887 * up and then immediately having to go back to sleep. This provides a
888 * performance gain and makes the I/O scheduling a bit more deterministic.
889 * The list of sleepers is variable afs_bioqueue. The unique address
890 * on which to sleep is passed to get_bioreq as its parameter.
892 /*static*/ struct buf *afs_get_bioreq(self)
893 struct afs_bioqueue *self; /* address on which to sleep */
896 struct buf *bp = (struct buf *) 0;
898 struct buf **bestlbpP, **lbpP;
900 struct buf *t1P, *t2P; /* temp pointers for list manipulation */
903 struct afs_bioqueue *s;
905 /* ??? Does the forward pointer of the returned buffer need to be NULL?
908 /* Disable interrupts from the strategy function, and save the
909 * prior priority level
911 oldPriority = i_disable ( INTMAX ) ;
913 /* Each iteration of following loop either pulls
914 * a buffer off afs_asyncbuf, or sleeps.
916 while (1) { /* inner loop */
918 /* look for oldest buffer */
919 bp = bestbp = afs_asyncbuf;
920 bestage = (int) bestbp->av_back;
921 bestlbpP = &afs_asyncbuf;
926 if ((int) bp->av_back - bestage < 0) {
929 bestage = (int) bp->av_back;
933 *bestlbpP = bp->av_forw;
939 /* If afs_asyncbuf is null, it is necessary to go to sleep.
940 * There are two possibilities: either there is already a
941 * daemon that is sleeping on the address of afs_asyncbuf,
944 if (afs_bioqueue.sleeper) {
946 QAdd (&(afs_bioqueue.lruq), &(self->lruq));
947 interrupted = sleep ((caddr_t) self, PCATCH|(PZERO + 1));
948 if (self->lruq.next != &self->lruq) { /* XXX ##3 XXX */
949 QRemove (&(self->lruq)); /* dequeue */
952 afs_bioqueue.sleeper = FALSE;
954 /* re-enable interrupts from strategy */
955 i_enable (oldPriority);
960 afs_bioqueue.sleeper = TRUE;
961 interrupted = sleep ((caddr_t) &afs_asyncbuf, PCATCH|(PZERO + 1));
962 afs_bioqueue.sleeper = FALSE;
966 * We need to wakeup another daemon if present
967 * since we were waiting on afs_asyncbuf.
969 #ifdef notdef /* The following doesn't work as advertised */
970 if (afs_bioqueue.lruq.next != &afs_bioqueue.lruq)
972 struct squeue *bq = afs_bioqueue.lruq.next;
977 /* re-enable interrupts from strategy */
978 i_enable (oldPriority);
984 } /* end of "else asyncbuf is empty" */
985 } /* end of "inner loop" */
989 i_enable (oldPriority); /* re-enable interrupts from strategy */
991 /* For the convenience of other code, replace the gnodes in
992 * the b_vp field of bp and the other buffers on the b_work
993 * chain with the corresponding vnodes.
995 * ??? what happens to the gnodes? They're not just cut loose,
999 t2P = (struct buf *) t1P->b_work;
1000 t1P->b_vp = ((struct gnode *) t1P->b_vp)->gn_vnode;
1004 t1P = (struct buf *) t2P->b_work;
1005 t2P->b_vp = ((struct gnode *) t2P->b_vp)->gn_vnode;
1010 /* If the buffer does not specify I/O, it may immediately
1011 * be returned to the caller. This condition is detected
1012 * by examining the buffer's flags (the b_flags field). If
1013 * the B_PFPROT bit is set, the buffer represents a protection
1014 * violation, rather than a request for I/O. The remainder
1015 * of the outer loop handles the case where the B_PFPROT bit is clear.
1017 if (bp->b_flags & B_PFPROT) {
1021 /* wake up another process to handle the next buffer, and return
1024 oldPriority = i_disable ( INTMAX ) ;
1026 /* determine where to find the sleeping process.
1027 * There are two cases: either it is sleeping on
1028 * afs_asyncbuf, or it is sleeping on its own unique
1029 * address. These cases are distinguished by examining
1030 * the sleeper field of afs_bioqueue.
1032 if (afs_bioqueue.sleeper) {
1033 wakeup (&afs_asyncbuf);
1036 if (afs_bioqueue.lruq.next == &afs_bioqueue.lruq) {
1037 /* queue is empty, what now? ???*/
1038 /* Should this be impossible, or does */
1039 /* it just mean that nobody is sleeping? */;
1042 struct squeue *bq = afs_bioqueue.lruq.next;
1046 afs_bioqueue.sleeper = TRUE;
1049 i_enable (oldPriority); /* re-enable interrupts from strategy */
1052 } /* end of function get_bioreq() */
1057 * This function is the daemon. It is called from the syscall
1058 * interface. Ordinarily, a script or an administrator will run a
1059 * daemon startup utility, specifying the number of I/O daemons to
1060 * run. The utility will fork off that number of processes,
1061 * each making the appropriate syscall, which will cause this
1062 * function to be invoked.
1064 static int afs_initbiod = 0; /* this is self-initializing code */
1066 afs_BioDaemon (nbiods)
1069 struct afs_bioqueue *self;
1070 afs_int32 code, s, pflg = 0;
1072 struct buf *bp, *bp1, *tbp1, *tbp2; /* temp pointers only */
1077 if (!afs_initbiod) {
1080 /* Initialize the queue of waiting processes, afs_bioqueue. */
1081 QInit (&(afs_bioqueue.lruq));
1084 /* establish ourself as a kernel process so shutdown won't kill us */
1085 /* u.u_procp->p_flag |= SKPROC;*/
1087 /* Initialize a token (self) to use in the queue of sleeping processes. */
1088 self = (struct afs_bioqueue *) afs_osi_Alloc (sizeof (struct afs_bioqueue));
1089 pin (self, sizeof (struct afs_bioqueue)); /* fix in memory */
1090 memset(self, 0, sizeof(*self));
1091 QInit (&(self->lruq)); /* initialize queue entry pointers */
1094 /* Ignore HUP signals... */
1095 #ifdef AFS_AIX41_ENV
1097 sigset_t sigbits, osigbits;
1099 * add SIGHUP to the set of already masked signals
1101 SIGFILLSET(sigbits); /* allow all signals */
1102 SIGDELSET(sigbits, SIGHUP); /* except SIGHUP */
1103 limit_sigs(&sigbits, &osigbits); /* and already masked */
1106 SIGDELSET(u.u_procp->p_sig, SIGHUP);
1107 SIGADDSET(u.u_procp->p_sigignore, SIGHUP);
1108 SIGDELSET(u.u_procp->p_sigcatch, SIGHUP);
1110 /* Main body starts here -- this is an intentional infinite loop, and
1113 * Now, the loop will exit if get_bioreq() returns NULL, indicating
1114 * that we've been interrupted.
1117 bp = afs_get_bioreq(self);
1119 break; /* we were interrupted */
1120 if (code = setjmpx(&jmpbuf)) {
1121 /* This should not have happend, maybe a lack of resources */
1123 for (bp1 = bp; bp ; bp = bp1) {
1128 bp->b_flags |= B_ERROR;
1134 vcp = (struct vcache *)bp->b_vp;
1135 if (bp->b_flags & B_PFSTORE) {
1136 ObtainWriteLock(&vcp->lock,210);
1137 if (vcp->v.v_gnode->gn_mwrcnt) {
1138 if (vcp->m.Length < bp->b_bcount + (u_int)dbtob(bp->b_blkno))
1139 vcp->m.Length = bp->b_bcount + (u_int)dbtob(bp->b_blkno);
1141 ReleaseWriteLock(&vcp->lock);
1143 /* If the buffer represents a protection violation, rather than
1144 * an actual request for I/O, no special action need be taken.
1146 if ( bp->b_flags & B_PFPROT ) {
1147 iodone (bp); /* Notify all users of the buffer that we're done */
1151 ObtainWriteLock(&vcp->pvmlock,558);
1153 * First map its data area to a region in the current address space
1154 * by calling vm_att with the subspace identifier, and a pointer to
1155 * the data area. vm_att returns a new data area pointer, but we
1156 * also want to hang onto the old one.
1158 tmpaddr = bp->b_baddr;
1159 bp->b_baddr = vm_att (bp->b_xmemd.subspace_id, tmpaddr);
1160 tmperr = afs_ustrategy(bp); /* temp variable saves offset calculation */
1161 if (tmperr) { /* in non-error case */
1162 bp->b_flags |= B_ERROR; /* should other flags remain set ??? */
1163 bp->b_error = tmperr;
1166 /* Unmap the buffer's data area by calling vm_det. Reset data area
1167 * to the value that we saved above.
1169 vm_det(bp->b_un.b_addr);
1170 bp->b_baddr = tmpaddr;
1173 * buffer may be linked with other buffers via the b_work field.
1174 * See also naix_vm_strategy. For each buffer in the chain (including
1175 * bp) notify all users of the buffer that the daemon is finished
1176 * using it by calling iodone.
1177 * assumes iodone can modify the b_work field.
1180 tbp2 = (struct buf *) tbp1->b_work;
1185 tbp1 = (struct buf *) tbp2->b_work;
1191 ReleaseWriteLock(&vcp->pvmlock); /* Unlock the vnode. */
1193 } /* infinite loop (unless we're interrupted) */
1194 unpin (self, sizeof (struct afs_bioqueue));
1195 afs_osi_Free (self, sizeof (struct afs_bioqueue));
1196 } /* end of afs_BioDaemon() */
1197 #endif /* AFS_AIX41_ENV */
1198 #endif /* AFS_AIX32_ENV */
1202 void afs_BackgroundDaemon() {
1203 struct brequest *tb;
1207 AFS_STATCNT(afs_BackgroundDaemon);
1208 /* initialize subsystem */
1210 LOCK_INIT(&afs_xbrs, "afs_xbrs");
1211 memset((char *)afs_brs, 0, sizeof(afs_brs));
1213 #if defined (AFS_SGI_ENV) && defined(AFS_SGI_SHORTSTACK)
1215 * steal the first daemon for doing delayed DSlot flushing
1216 * (see afs_GetDownDSlot)
1225 MObtainWriteLock(&afs_xbrs,302);
1227 if (afs_termState == AFSOP_STOP_BKG) {
1228 if (--afs_nbrs <= 0)
1229 afs_termState = AFSOP_STOP_TRUNCDAEMON;
1230 MReleaseWriteLock(&afs_xbrs);
1231 afs_osi_Wakeup(&afs_termState);
1235 /* find a request */
1238 for(i=0;i<NBRS;i++,tb++) {
1239 /* look for request */
1240 if ((tb->refCount > 0) && !(tb->flags & BSTARTED)) {
1241 /* new request, not yet picked up */
1242 tb->flags |= BSTARTED;
1243 MReleaseWriteLock(&afs_xbrs);
1245 afs_Trace1(afs_iclSetp, CM_TRACE_BKG1,
1246 ICL_TYPE_INT32, tb->opcode);
1247 if (tb->opcode == BOP_FETCH)
1249 else if (tb->opcode == BOP_STORE)
1251 else if (tb->opcode == BOP_PATH)
1253 else panic("background bop");
1256 tb->vnode->vrefCount--; /* fix up reference count */
1258 AFS_RELE((struct vnode *)(tb->vnode)); /* MUST call vnode layer or could lose vnodes */
1260 tb->vnode = (struct vcache *) 0;
1264 tb->cred = (struct AFS_UCRED *) 0;
1266 afs_BRelease(tb); /* this grabs and releases afs_xbrs lock */
1267 MObtainWriteLock(&afs_xbrs,305);
1271 /* wait for new request */
1273 MReleaseWriteLock(&afs_xbrs);
1274 afs_osi_Sleep(&afs_brsDaemons);
1275 MObtainWriteLock(&afs_xbrs,307);
1282 void shutdown_daemons()
1284 extern int afs_cold_shutdown;
1286 register struct brequest *tb;
1288 AFS_STATCNT(shutdown_daemons);
1289 if (afs_cold_shutdown) {
1290 afs_brsDaemons = brsInit = 0;
1291 rxepoch_checked = afs_nbrs = 0;
1292 memset((char *)afs_brs, 0, sizeof(afs_brs));
1293 memset((char *)&afs_xbrs, 0, sizeof(afs_lock_t));
1295 #ifdef AFS_AIX32_ENV
1296 #ifdef AFS_AIX41_ENV
1297 lock_free(&afs_asyncbuf_lock);
1298 unpin(&afs_asyncbuf, sizeof(struct buf*));
1299 pin (&afs_asyncbuf_cv, sizeof(afs_int32));
1300 #else /* AFS_AIX41_ENV */
1303 memset((char *)&afs_bioqueue, 0, sizeof(struct afs_bioqueue));
1310 #if defined(AFS_SGI_ENV) && defined(AFS_SGI_SHORTSTACK)
1312 * sgi - daemon - handles certain operations that otherwise
1313 * would use up too much kernel stack space
1315 * This all assumes that since the caller must have the xdcache lock
1316 * exclusively that the list will never be more than one long
1317 * and noone else can attempt to add anything until we're done.
1319 SV_TYPE afs_sgibksync;
1320 SV_TYPE afs_sgibkwait;
1321 lock_t afs_sgibklock;
1322 struct dcache *afs_sgibklist;
1330 if (afs_sgibklock == NULL) {
1331 SV_INIT(&afs_sgibksync, "bksync", 0, 0);
1332 SV_INIT(&afs_sgibkwait, "bkwait", 0, 0);
1333 SPINLOCK_INIT(&afs_sgibklock, "bklock");
1335 s = SPLOCK(afs_sgibklock);
1337 /* wait for something to do */
1338 SP_WAIT(afs_sgibklock, s, &afs_sgibksync, PINOD);
1339 osi_Assert(afs_sgibklist);
1341 /* XX will probably need to generalize to real list someday */
1342 s = SPLOCK(afs_sgibklock);
1343 while (afs_sgibklist) {
1344 tdc = afs_sgibklist;
1345 afs_sgibklist = NULL;
1346 SPUNLOCK(afs_sgibklock, s);
1348 tdc->flags &= ~DFEntryMod;
1349 afs_WriteDCache(tdc, 1);
1351 s = SPLOCK(afs_sgibklock);
1354 /* done all the work - wake everyone up */
1355 while (SV_SIGNAL(&afs_sgibkwait))