2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "afs/param.h"
17 #include <sys/sleep.h>
20 #include "afs/sysincludes.h" /* Standard vendor system headers */
21 #include "afsincludes.h" /* Afs-based standard headers */
22 #include "afs/afs_stats.h" /* statistics gathering code */
23 #include "afs/afs_cbqueue.h"
25 #include <sys/adspace.h> /* for vm_att(), vm_det() */
29 /* background request queue size */
30 afs_lock_t afs_xbrs; /* lock for brs */
31 static int brsInit = 0;
32 short afs_brsWaiters = 0; /* number of users waiting for brs buffers */
33 short afs_brsDaemons = 0; /* number of daemons waiting for brs requests */
34 struct brequest afs_brs[NBRS]; /* request structures */
35 struct afs_osi_WaitHandle AFS_WaitHandler, AFS_CSWaitHandler;
36 static int afs_brs_count = 0; /* request counter, to service reqs in order */
38 static int rxepoch_checked=0;
39 #define afs_CheckRXEpoch() {if (rxepoch_checked == 0 && rxkad_EpochWasSet) { \
40 rxepoch_checked = 1; afs_GCUserData(/* force flag */ 1); } }
42 /* PAG garbage collection */
43 /* We induce a compile error if param.h does not define AFS_GCPAGS */
44 afs_int32 afs_gcpags=AFS_GCPAGS;
45 afs_int32 afs_gcpags_procsize = 0;
47 afs_int32 afs_CheckServerDaemonStarted = 0;
48 #ifdef DEFAULT_PROBE_INTERVAL
49 afs_int32 PROBE_INTERVAL=DEFAULT_PROBE_INTERVAL; /* overridding during compile */
51 afs_int32 PROBE_INTERVAL=180; /* default to 3 min */
54 #define PROBE_WAIT() (1000 * (PROBE_INTERVAL - ((afs_random() & 0x7fffffff) \
55 % (PROBE_INTERVAL/2))))
57 void afs_CheckServerDaemon(void)
59 afs_int32 now, delay, lastCheck, last10MinCheck;
61 afs_CheckServerDaemonStarted = 1;
63 while (afs_initState < 101) afs_osi_Sleep(&afs_initState);
64 afs_osi_Wait(PROBE_WAIT(), &AFS_CSWaitHandler, 0);
66 last10MinCheck = lastCheck = osi_Time();
68 if (afs_termState == AFSOP_STOP_CS) {
69 afs_termState = AFSOP_STOP_BKG;
70 afs_osi_Wakeup(&afs_termState);
75 if (PROBE_INTERVAL + lastCheck <= now) {
76 afs_CheckServers(1, NULL); /* check down servers */
77 lastCheck = now = osi_Time();
80 if (600 + last10MinCheck <= now) {
81 afs_Trace1(afs_iclSetp, CM_TRACE_PROBEUP, ICL_TYPE_INT32, 600);
82 afs_CheckServers(0, NULL);
83 last10MinCheck = now = osi_Time();
86 if (afs_termState == AFSOP_STOP_CS) {
87 afs_termState = AFSOP_STOP_BKG;
88 afs_osi_Wakeup(&afs_termState);
92 /* Compute time to next probe. */
93 delay = PROBE_INTERVAL + lastCheck;
94 if (delay > 600 + last10MinCheck)
95 delay = 600 + last10MinCheck;
99 afs_osi_Wait(delay * 1000, &AFS_CSWaitHandler, 0);
101 afs_CheckServerDaemonStarted = 0;
104 void afs_Daemon(void)
107 struct afs_exporter *exporter;
109 afs_int32 last3MinCheck, last10MinCheck, last60MinCheck, lastNMinCheck;
110 afs_int32 last1MinCheck;
111 afs_uint32 lastCBSlotBump;
114 AFS_STATCNT(afs_Daemon);
115 last1MinCheck = last3MinCheck = last60MinCheck = last10MinCheck = lastNMinCheck = 0;
117 afs_rootFid.Fid.Volume = 0;
118 while (afs_initState < 101) afs_osi_Sleep(&afs_initState);
121 lastCBSlotBump = now;
123 /* when a lot of clients are booted simultaneously, they develop
124 * annoying synchronous VL server bashing behaviors. So we stagger them.
126 last1MinCheck = now + ((afs_random() & 0x7fffffff) % 60); /* an extra 30 */
127 last3MinCheck = now - 90 + ((afs_random() & 0x7fffffff) % 180);
128 last60MinCheck = now - 1800 + ((afs_random() & 0x7fffffff) % 3600);
129 last10MinCheck = now - 300 + ((afs_random() & 0x7fffffff) % 600);
130 lastNMinCheck = now - 90 + ((afs_random() & 0x7fffffff) % 180);
132 /* start off with afs_initState >= 101 (basic init done) */
134 afs_CheckCallbacks(20); /* unstat anything which will expire soon */
136 /* things to do every 20 seconds or less - required by protocol spec */
138 afs_FlushActiveVcaches(0); /* flush NFS writes */
139 afs_FlushVCBs(1); /* flush queued callbacks */
140 afs_MaybeWakeupTruncateDaemon(); /* free cache space if have too */
141 rx_CheckPackets(); /* Does RX need more packets? */
142 #if defined(AFS_AIX32_ENV) || defined(AFS_HPUX_ENV)
144 * Hack: We always want to make sure there are plenty free
145 * entries in the small free pool so that we don't have to
146 * worry about rx (with disabled interrupts) to have to call
147 * malloc). So we do the dummy call below...
149 if (((afs_stats_cmperf.SmallBlocksAlloced - afs_stats_cmperf.SmallBlocksActive)
150 <= AFS_SALLOC_LOW_WATER))
151 osi_FreeSmallSpace(osi_AllocSmallSpace(AFS_SMALLOCSIZ));
152 if (((afs_stats_cmperf.MediumBlocksAlloced - afs_stats_cmperf.MediumBlocksActive)
153 <= AFS_MALLOC_LOW_WATER+50))
154 osi_AllocMoreMSpace(AFS_MALLOC_LOW_WATER * 2);
158 if (lastCBSlotBump + CBHTSLOTLEN < now) { /* pretty time-dependant */
159 lastCBSlotBump = now;
160 if (afs_BumpBase()) {
161 afs_CheckCallbacks(20); /* unstat anything which will expire soon */
165 if (last1MinCheck + 60 < now) {
166 /* things to do every minute */
167 DFlush(); /* write out dir buffers */
168 afs_WriteThroughDSlots(); /* write through cacheinfo entries */
169 afs_FlushActiveVcaches(1);/* keep flocks held & flush nfs writes */
170 #ifdef AFS_DISCON_ENV
171 afs_StoreDirtyVcaches();
177 if (last3MinCheck + 180 < now) {
178 afs_CheckTokenCache(); /* check for access cache resets due to expired
182 if (!afs_CheckServerDaemonStarted) {
183 /* Do the check here if the correct afsd is not installed. */
186 printf("Please install afsd with check server daemon.\n");
188 if (lastNMinCheck + PROBE_INTERVAL < now) {
189 /* only check down servers */
190 afs_CheckServers(1, NULL);
194 if (last10MinCheck + 600 < now) {
195 #ifdef AFS_USERSPACE_IP_ADDR
196 extern int rxi_GetcbiInfo(void);
198 afs_Trace1(afs_iclSetp, CM_TRACE_PROBEUP,
199 ICL_TYPE_INT32, 600);
200 #ifdef AFS_USERSPACE_IP_ADDR
201 if (rxi_GetcbiInfo()) { /* addresses changed from last time */
204 #else /* AFS_USERSPACE_IP_ADDR */
205 if (rxi_GetIFInfo()) { /* addresses changed from last time */
208 #endif /* else AFS_USERSPACE_IP_ADDR */
209 if (!afs_CheckServerDaemonStarted)
210 afs_CheckServers(0, NULL);
211 afs_GCUserData(0); /* gc old conns */
212 /* This is probably the wrong way of doing GC for the various exporters but it will suffice for a while */
213 for (exporter = root_exported; exporter; exporter = exporter->exp_next) {
214 (void) EXP_GC(exporter, 0); /* Generalize params */
219 afs_CheckVolumeNames(AFS_VOLCHECK_EXPIRED |
223 afs_CheckVolumeNames(AFS_VOLCHECK_EXPIRED |
228 last10MinCheck = now;
230 if (last60MinCheck + 3600 < now) {
231 afs_Trace1(afs_iclSetp, CM_TRACE_PROBEVOLUME,
232 ICL_TYPE_INT32, 3600);
233 afs_CheckRootVolume();
235 if (afs_gcpags == AFS_GCPAGS_OK) {
240 last60MinCheck = now;
242 if (afs_initState < 300) { /* while things ain't rosy */
243 code = afs_CheckRootVolume();
244 if (code == 0) afs_initState = 300; /* succeeded */
245 if (afs_initState < 200) afs_initState = 200; /* tried once */
246 afs_osi_Wakeup(&afs_initState);
249 /* 18285 is because we're trying to divide evenly into 128, that is,
250 * CBSlotLen, while staying just under 20 seconds. If CBSlotLen
251 * changes, should probably change this interval, too.
252 * Some of the preceding actions may take quite some time, so we
253 * might not want to wait the entire interval */
254 now = 18285 - (osi_Time() - now);
256 afs_osi_Wait(now, &AFS_WaitHandler, 0);
259 if (afs_termState == AFSOP_STOP_AFS) {
260 if (afs_CheckServerDaemonStarted)
261 afs_termState = AFSOP_STOP_CS;
263 afs_termState = AFSOP_STOP_BKG;
264 afs_osi_Wakeup(&afs_termState);
270 int afs_CheckRootVolume (void)
272 char rootVolName[32];
273 struct volume *tvp = NULL;
274 int usingDynroot = afs_GetDynrootEnable();
277 AFS_STATCNT(afs_CheckRootVolume);
278 if (*afs_rootVolumeName == 0) {
279 strcpy(rootVolName, "root.afs");
281 strcpy(rootVolName, afs_rootVolumeName);
285 struct cell *lc = afs_GetPrimaryCell(READ_LOCK);
289 localcell = lc->cellNum;
290 afs_PutCell(lc, READ_LOCK);
294 afs_GetDynrootFid(&afs_rootFid);
295 tvp = afs_GetVolume(&afs_rootFid, NULL, READ_LOCK);
297 tvp = afs_GetVolumeByName(rootVolName, localcell, 1, NULL, READ_LOCK);
299 if (!tvp && !usingDynroot) {
301 int len = strlen(rootVolName);
303 if ((len < 9) || strcmp(&rootVolName[len - 9], ".readonly")) {
304 strcpy(buf, rootVolName);
305 afs_strcat(buf, ".readonly");
306 tvp = afs_GetVolumeByName(buf, localcell, 1, NULL, READ_LOCK);
311 int volid = (tvp->roVol? tvp->roVol : tvp->volume);
312 afs_rootFid.Cell = localcell;
313 if (afs_rootFid.Fid.Volume && afs_rootFid.Fid.Volume != volid
315 /* If we had a root fid before and it changed location we reset
316 * the afs_globalVp so that it will be reevaluated.
317 * Just decrement the reference count. This only occurs during
318 * initial cell setup and can panic the machine if we set the
319 * count to zero and fs checkv is executed when the current
322 AFS_FAST_RELE(afs_globalVp);
325 afs_rootFid.Fid.Volume = volid;
326 afs_rootFid.Fid.Vnode = 1;
327 afs_rootFid.Fid.Unique = 1;
329 afs_initState = 300; /* won */
330 afs_osi_Wakeup(&afs_initState);
331 afs_PutVolume(tvp, READ_LOCK);
334 /* This is to make sure that we update the root gnode */
335 /* every time root volume gets released */
337 struct gnode *rootgp;
341 /* Only do this if afs_globalVFS is properly set due to race conditions
342 this routine could be called before the gfs_mount is performed!
343 Furthermore, afs_root (called below) *waits* until
344 initState >= 200, so we don't try this until we've gotten
346 if (afs_globalVFS && afs_initState >= 200) {
347 if (code = afs_root(afs_globalVFS, &rootgp))
349 mp = (struct mount *) afs_globalVFS->vfs_data ;
350 mp->m_rootgp = gget(mp, 0, 0, (char *)rootgp);
351 afs_unlock(mp->m_rootgp); /* unlock basic gnode */
352 afs_vrele(VTOAFS(rootgp)); /* zap afs_root's vnode hold */
356 if (afs_rootFid.Fid.Volume) return 0;
360 /* ptr_parm 0 is the pathname, size_parm 0 to the fetch is the chunk number */
361 static void BPath(register struct brequest *ab)
363 register struct dcache *tdc = NULL;
364 struct vcache *tvc = NULL;
365 struct vnode *tvn = NULL;
366 #ifdef AFS_LINUX22_ENV
367 struct dentry *dp = NULL;
369 afs_size_t offset, len;
370 struct vrequest treq;
374 if ((code = afs_InitReq(&treq, ab->cred))) return;
376 #ifdef AFS_LINUX22_ENV
377 code = gop_lookupname((char *)ab->ptr_parm[0], AFS_UIOSYS, 1, NULL, &dp);
379 tvn = (struct vnode*)dp->d_inode;
381 code = gop_lookupname((char *)ab->ptr_parm[0], AFS_UIOSYS, 1, NULL, &tvn);
384 osi_FreeLargeSpace((char *)ab->ptr_parm[0]); /* free path name buffer here */
386 /* now path may not have been in afs, so check that before calling our cache manager */
387 if (!tvn || !IsAfsVnode(tvn)) {
388 /* release it and give up */
393 #ifdef AFS_LINUX22_ENV
403 tvc = VTOAFS(afs_gntovn(tvn));
407 /* here we know its an afs vnode, so we can get the data for the chunk */
408 tdc = afs_GetDCache(tvc, ab->size_parm[0], &treq, &offset, &len, 1);
415 #ifdef AFS_LINUX22_ENV
423 /* size_parm 0 to the fetch is the chunk number,
424 * ptr_parm 0 is the dcache entry to wakeup,
425 * size_parm 1 is true iff we should release the dcache entry here.
427 static void BPrefetch(register struct brequest *ab)
429 register struct dcache *tdc;
430 register struct vcache *tvc;
431 afs_size_t offset, len;
432 struct vrequest treq;
434 AFS_STATCNT(BPrefetch);
435 if ((len = afs_InitReq(&treq, ab->cred))) return;
437 tdc = afs_GetDCache(tvc, ab->size_parm[0], &treq, &offset, &len, 1);
441 /* now, dude may be waiting for us to clear DFFetchReq bit; do so. Can't
442 * use tdc from GetDCache since afs_GetDCache may fail, but someone may
443 * be waiting for our wakeup anyway.
445 tdc = (struct dcache *) (ab->ptr_parm[0]);
446 ObtainSharedLock(&tdc->lock, 640);
447 if (tdc->mflags & DFFetchReq) {
448 UpgradeSToWLock(&tdc->lock, 641);
449 tdc->mflags &= ~DFFetchReq;
450 ReleaseWriteLock(&tdc->lock);
452 ReleaseSharedLock(&tdc->lock);
454 afs_osi_Wakeup(&tdc->validPos);
455 if (ab->size_parm[1]) {
456 afs_PutDCache(tdc); /* put this one back, too */
461 static void BStore(register struct brequest *ab)
463 register struct vcache *tvc;
464 register afs_int32 code;
465 struct vrequest treq;
466 #if defined(AFS_SGI_ENV)
467 struct cred *tmpcred;
471 if ((code = afs_InitReq(&treq, ab->cred))) return;
474 #if defined(AFS_SGI_ENV)
476 * Since StoreOnLastReference can end up calling osi_SyncVM which
477 * calls into VM code that assumes that u.u_cred has the
478 * correct credentials, we set our to theirs for this xaction
480 tmpcred = OSI_GET_CURRENT_CRED();
481 OSI_SET_CURRENT_CRED(ab->cred);
484 * To avoid recursion since the WriteLock may be released during VM
485 * operations, we hold the VOP_RWLOCK across this transaction as
486 * do the other callers of StoreOnLastReference
488 AFS_RWLOCK((vnode_t *)tvc, 1);
490 ObtainWriteLock(&tvc->lock,209);
491 code = afs_StoreOnLastReference(tvc, &treq);
492 ReleaseWriteLock(&tvc->lock);
493 #if defined(AFS_SGI_ENV)
494 OSI_SET_CURRENT_CRED(tmpcred);
495 AFS_RWUNLOCK((vnode_t *)tvc, 1);
497 /* now set final return code, and wakeup anyone waiting */
498 if ((ab->flags & BUVALID) == 0) {
499 ab->code = afs_CheckCode(code, &treq, 43); /* set final code, since treq doesn't go across processes */
500 ab->flags |= BUVALID;
501 if (ab->flags & BUWAIT) {
502 ab->flags &= ~BUWAIT;
508 /* release a held request buffer */
509 void afs_BRelease(register struct brequest *ab)
512 AFS_STATCNT(afs_BRelease);
513 MObtainWriteLock(&afs_xbrs,294);
514 if (--ab->refCount <= 0) {
517 if (afs_brsWaiters) afs_osi_Wakeup(&afs_brsWaiters);
518 MReleaseWriteLock(&afs_xbrs);
521 /* return true if bkg fetch daemons are all busy */
524 AFS_STATCNT(afs_BBusy);
525 if (afs_brsDaemons > 0) return 0;
529 struct brequest *afs_BQueue(register short aopcode, register struct vcache *avc,
530 afs_int32 dontwait, afs_int32 ause, struct AFS_UCRED *acred,
531 afs_size_t asparm0, afs_size_t asparm1, void *apparm0)
534 register struct brequest *tb;
536 AFS_STATCNT(afs_BQueue);
537 MObtainWriteLock(&afs_xbrs,296);
540 for(i=0;i<NBRS;i++,tb++) {
541 if (tb->refCount == 0) break;
545 tb->opcode = aopcode;
553 #if defined(AFS_NETBSD_ENV) || defined(AFS_OBSD_ENV)
554 AFS_HOLD(AFSTOV(avc));
556 VN_HOLD(AFSTOV(avc));
560 tb->refCount = ause+1;
561 tb->size_parm[0] = asparm0;
562 tb->size_parm[1] = asparm1;
563 tb->ptr_parm[0] = apparm0;
566 tb->ts = afs_brs_count++;
567 /* if daemons are waiting for work, wake them up */
568 if (afs_brsDaemons > 0) {
569 afs_osi_Wakeup(&afs_brsDaemons);
571 MReleaseWriteLock(&afs_xbrs);
575 MReleaseWriteLock(&afs_xbrs);
578 /* no free buffers, sleep a while */
580 MReleaseWriteLock(&afs_xbrs);
581 afs_osi_Sleep(&afs_brsWaiters);
582 MObtainWriteLock(&afs_xbrs,301);
589 /* AIX 4.1 has a much different sleep/wakeup mechanism available for use.
590 * The modifications here will work for either a UP or MP machine.
592 struct buf *afs_asyncbuf = (struct buf*)0;
593 tid_t afs_asyncbuf_cv = EVENT_NULL;
594 afs_int32 afs_biodcnt = 0;
596 /* in implementing this, I assumed that all external linked lists were
599 * Several places in this code traverse a linked list. The algorithm
600 * used here is probably unfamiliar to most people. Careful examination
601 * will show that it eliminates an assignment inside the loop, as compared
602 * to the standard algorithm, at the cost of occasionally using an extra
608 * This function obtains, and returns, a pointer to a buffer for
609 * processing by a daemon. It sleeps until such a buffer is available.
610 * The source of buffers for it is the list afs_asyncbuf (see also
611 * naix_vm_strategy). This function may be invoked concurrently by
612 * several processes, that is, several instances of the same daemon.
613 * naix_vm_strategy, which adds buffers to the list, runs at interrupt
614 * level, while get_bioreq runs at process level.
616 * Since AIX 4.1 can wake just one process at a time, the separate sleep
617 * addresses have been removed.
618 * Note that the kernel_lock is held until the e_sleep_thread() occurs.
619 * The afs_asyncbuf_lock is primarily used to serialize access between
620 * process and interrupts.
622 Simple_lock afs_asyncbuf_lock;
623 /*static*/ struct buf *afs_get_bioreq()
625 struct buf *bp = NULL;
627 struct buf **bestlbpP, **lbpP;
629 struct buf *t1P, *t2P; /* temp pointers for list manipulation */
632 struct afs_bioqueue *s;
634 /* ??? Does the forward pointer of the returned buffer need to be NULL?
637 /* Disable interrupts from the strategy function, and save the
638 * prior priority level and lock access to the afs_asyncbuf.
641 oldPriority = disable_lock(INTMAX, &afs_asyncbuf_lock) ;
645 /* look for oldest buffer */
646 bp = bestbp = afs_asyncbuf;
647 bestage = (long) bestbp->av_back;
648 bestlbpP = &afs_asyncbuf;
653 if ((long) bp->av_back - bestage < 0) {
656 bestage = (long) bp->av_back;
660 *bestlbpP = bp->av_forw;
664 /* If afs_asyncbuf is null, it is necessary to go to sleep.
665 * e_wakeup_one() ensures that only one thread wakes.
668 /* The LOCK_HANDLER indicates to e_sleep_thread to only drop the
669 * lock on an MP machine.
671 interrupted = e_sleep_thread(&afs_asyncbuf_cv,
673 LOCK_HANDLER|INTERRUPTIBLE);
674 if (interrupted==THREAD_INTERRUPTED) {
675 /* re-enable interrupts from strategy */
676 unlock_enable(oldPriority, &afs_asyncbuf_lock);
680 } /* end of "else asyncbuf is empty" */
681 } /* end of "inner loop" */
685 unlock_enable(oldPriority, &afs_asyncbuf_lock);
688 /* For the convenience of other code, replace the gnodes in
689 * the b_vp field of bp and the other buffers on the b_work
690 * chain with the corresponding vnodes.
692 * ??? what happens to the gnodes? They're not just cut loose,
696 t2P = (struct buf *) t1P->b_work;
697 t1P->b_vp = ((struct gnode *) t1P->b_vp)->gn_vnode;
701 t1P = (struct buf *) t2P->b_work;
702 t2P->b_vp = ((struct gnode *) t2P->b_vp)->gn_vnode;
707 /* If the buffer does not specify I/O, it may immediately
708 * be returned to the caller. This condition is detected
709 * by examining the buffer's flags (the b_flags field). If
710 * the B_PFPROT bit is set, the buffer represents a protection
711 * violation, rather than a request for I/O. The remainder
712 * of the outer loop handles the case where the B_PFPROT bit is clear.
714 if (bp->b_flags & B_PFPROT) {
719 } /* end of function get_bioreq() */
724 * This function is the daemon. It is called from the syscall
725 * interface. Ordinarily, a script or an administrator will run a
726 * daemon startup utility, specifying the number of I/O daemons to
727 * run. The utility will fork off that number of processes,
728 * each making the appropriate syscall, which will cause this
729 * function to be invoked.
731 static int afs_initbiod = 0; /* this is self-initializing code */
733 int afs_BioDaemon (afs_int32 nbiods)
735 afs_int32 code, s, pflg = 0;
737 struct buf *bp, *bp1, *tbp1, *tbp2; /* temp pointers only */
745 /* pin lock, since we'll be using it in an interrupt. */
746 lock_alloc(&afs_asyncbuf_lock, LOCK_ALLOC_PIN, 2, 1);
747 simple_lock_init(&afs_asyncbuf_lock);
748 pin (&afs_asyncbuf, sizeof(struct buf*));
749 pin (&afs_asyncbuf_cv, sizeof(afs_int32));
752 /* Ignore HUP signals... */
754 sigset_t sigbits, osigbits;
756 * add SIGHUP to the set of already masked signals
758 SIGFILLSET(sigbits); /* allow all signals */
759 SIGDELSET(sigbits, SIGHUP); /* except SIGHUP */
760 limit_sigs(&sigbits, &osigbits); /* and already masked */
762 /* Main body starts here -- this is an intentional infinite loop, and
765 * Now, the loop will exit if get_bioreq() returns NULL, indicating
766 * that we've been interrupted.
769 bp = afs_get_bioreq();
771 break; /* we were interrupted */
772 if (code = setjmpx(&jmpbuf)) {
773 /* This should not have happend, maybe a lack of resources */
775 s = disable_lock(INTMAX, &afs_asyncbuf_lock);
776 for (bp1 = bp; bp ; bp = bp1) {
778 bp1 = (struct buf *) bp1->b_work;
781 bp->b_flags |= B_ERROR;
784 unlock_enable(s, &afs_asyncbuf_lock);
788 vcp = VTOAFS(bp->b_vp);
789 if (bp->b_flags & B_PFSTORE) { /* XXXX */
790 ObtainWriteLock(&vcp->lock,404);
791 if (vcp->v.v_gnode->gn_mwrcnt) {
792 afs_offs_t newlength =
793 (afs_offs_t) dbtob(bp->b_blkno) + bp->b_bcount;
794 if (vcp->m.Length < newlength) {
795 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH,
796 ICL_TYPE_STRING, __FILE__,
797 ICL_TYPE_LONG, __LINE__,
798 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(vcp->m.Length),
799 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(newlength));
800 vcp->m.Length = newlength;
803 ReleaseWriteLock(&vcp->lock);
805 /* If the buffer represents a protection violation, rather than
806 * an actual request for I/O, no special action need be taken.
808 if ( bp->b_flags & B_PFPROT ) {
809 iodone (bp); /* Notify all users of the buffer that we're done */
814 ObtainWriteLock(&vcp->pvmlock,211);
816 * First map its data area to a region in the current address space
817 * by calling vm_att with the subspace identifier, and a pointer to
818 * the data area. vm_att returns a new data area pointer, but we
819 * also want to hang onto the old one.
821 tmpaddr = bp->b_baddr;
822 bp->b_baddr = (caddr_t) vm_att (bp->b_xmemd.subspace_id, tmpaddr);
823 tmperr = afs_ustrategy(bp); /* temp variable saves offset calculation */
824 if (tmperr) { /* in non-error case */
825 bp->b_flags |= B_ERROR; /* should other flags remain set ??? */
826 bp->b_error = tmperr;
829 /* Unmap the buffer's data area by calling vm_det. Reset data area
830 * to the value that we saved above.
833 bp->b_baddr = tmpaddr;
836 * buffer may be linked with other buffers via the b_work field.
837 * See also naix_vm_strategy. For each buffer in the chain (including
838 * bp) notify all users of the buffer that the daemon is finished
839 * using it by calling iodone.
840 * assumes iodone can modify the b_work field.
843 tbp2 = (struct buf *) tbp1->b_work;
848 tbp1 = (struct buf *) tbp2->b_work;
854 ReleaseWriteLock(&vcp->pvmlock); /* Unlock the vnode. */
856 } /* infinite loop (unless we're interrupted) */
857 } /* end of afs_BioDaemon() */
859 #else /* AFS_AIX41_ENV */
863 struct afs_bioqueue {
868 struct afs_bioqueue afs_bioqueue;
869 struct buf *afs_busyq = NULL;
870 struct buf *afs_asyncbuf;
871 afs_int32 afs_biodcnt = 0;
873 /* in implementing this, I assumed that all external linked lists were
876 * Several places in this code traverse a linked list. The algorithm
877 * used here is probably unfamiliar to most people. Careful examination
878 * will show that it eliminates an assignment inside the loop, as compared
879 * to the standard algorithm, at the cost of occasionally using an extra
885 * This function obtains, and returns, a pointer to a buffer for
886 * processing by a daemon. It sleeps until such a buffer is available.
887 * The source of buffers for it is the list afs_asyncbuf (see also
888 * naix_vm_strategy). This function may be invoked concurrently by
889 * several processes, that is, several instances of the same daemon.
890 * naix_vm_strategy, which adds buffers to the list, runs at interrupt
891 * level, while get_bioreq runs at process level.
893 * The common kernel paradigm of sleeping and waking up, in which all the
894 * competing processes sleep waiting for wakeups on one address, is not
895 * followed here. Instead, the following paradigm is used: when a daemon
896 * goes to sleep, it checks for other sleeping daemons. If there aren't any,
897 * it sleeps on the address of variable afs_asyncbuf. But if there is
898 * already a daemon sleeping on that address, it threads its own unique
899 * address onto a list, and sleeps on that address. This way, every
900 * sleeper is sleeping on a different address, and every wakeup wakes up
901 * exactly one daemon. This prevents a whole bunch of daemons from waking
902 * up and then immediately having to go back to sleep. This provides a
903 * performance gain and makes the I/O scheduling a bit more deterministic.
904 * The list of sleepers is variable afs_bioqueue. The unique address
905 * on which to sleep is passed to get_bioreq as its parameter.
907 /*static*/ struct buf *afs_get_bioreq(self)
908 struct afs_bioqueue *self; /* address on which to sleep */
911 struct buf *bp = NULL;
913 struct buf **bestlbpP, **lbpP;
915 struct buf *t1P, *t2P; /* temp pointers for list manipulation */
918 struct afs_bioqueue *s;
920 /* ??? Does the forward pointer of the returned buffer need to be NULL?
923 /* Disable interrupts from the strategy function, and save the
924 * prior priority level
926 oldPriority = i_disable ( INTMAX ) ;
928 /* Each iteration of following loop either pulls
929 * a buffer off afs_asyncbuf, or sleeps.
931 while (1) { /* inner loop */
933 /* look for oldest buffer */
934 bp = bestbp = afs_asyncbuf;
935 bestage = (int) bestbp->av_back;
936 bestlbpP = &afs_asyncbuf;
941 if ((int) bp->av_back - bestage < 0) {
944 bestage = (int) bp->av_back;
948 *bestlbpP = bp->av_forw;
954 /* If afs_asyncbuf is null, it is necessary to go to sleep.
955 * There are two possibilities: either there is already a
956 * daemon that is sleeping on the address of afs_asyncbuf,
959 if (afs_bioqueue.sleeper) {
961 QAdd (&(afs_bioqueue.lruq), &(self->lruq));
962 interrupted = sleep ((caddr_t) self, PCATCH|(PZERO + 1));
963 if (self->lruq.next != &self->lruq) { /* XXX ##3 XXX */
964 QRemove (&(self->lruq)); /* dequeue */
967 afs_bioqueue.sleeper = FALSE;
969 /* re-enable interrupts from strategy */
970 i_enable (oldPriority);
975 afs_bioqueue.sleeper = TRUE;
976 interrupted = sleep ((caddr_t) &afs_asyncbuf, PCATCH|(PZERO + 1));
977 afs_bioqueue.sleeper = FALSE;
981 * We need to wakeup another daemon if present
982 * since we were waiting on afs_asyncbuf.
984 #ifdef notdef /* The following doesn't work as advertised */
985 if (afs_bioqueue.lruq.next != &afs_bioqueue.lruq)
987 struct squeue *bq = afs_bioqueue.lruq.next;
992 /* re-enable interrupts from strategy */
993 i_enable (oldPriority);
999 } /* end of "else asyncbuf is empty" */
1000 } /* end of "inner loop" */
1004 i_enable (oldPriority); /* re-enable interrupts from strategy */
1006 /* For the convenience of other code, replace the gnodes in
1007 * the b_vp field of bp and the other buffers on the b_work
1008 * chain with the corresponding vnodes.
1010 * ??? what happens to the gnodes? They're not just cut loose,
1014 t2P = (struct buf *) t1P->b_work;
1015 t1P->b_vp = ((struct gnode *) t1P->b_vp)->gn_vnode;
1019 t1P = (struct buf *) t2P->b_work;
1020 t2P->b_vp = ((struct gnode *) t2P->b_vp)->gn_vnode;
1025 /* If the buffer does not specify I/O, it may immediately
1026 * be returned to the caller. This condition is detected
1027 * by examining the buffer's flags (the b_flags field). If
1028 * the B_PFPROT bit is set, the buffer represents a protection
1029 * violation, rather than a request for I/O. The remainder
1030 * of the outer loop handles the case where the B_PFPROT bit is clear.
1032 if (bp->b_flags & B_PFPROT) {
1036 /* wake up another process to handle the next buffer, and return
1039 oldPriority = i_disable ( INTMAX ) ;
1041 /* determine where to find the sleeping process.
1042 * There are two cases: either it is sleeping on
1043 * afs_asyncbuf, or it is sleeping on its own unique
1044 * address. These cases are distinguished by examining
1045 * the sleeper field of afs_bioqueue.
1047 if (afs_bioqueue.sleeper) {
1048 wakeup (&afs_asyncbuf);
1051 if (afs_bioqueue.lruq.next == &afs_bioqueue.lruq) {
1052 /* queue is empty, what now? ???*/
1053 /* Should this be impossible, or does */
1054 /* it just mean that nobody is sleeping? */;
1057 struct squeue *bq = afs_bioqueue.lruq.next;
1061 afs_bioqueue.sleeper = TRUE;
1064 i_enable (oldPriority); /* re-enable interrupts from strategy */
1067 } /* end of function get_bioreq() */
1072 * This function is the daemon. It is called from the syscall
1073 * interface. Ordinarily, a script or an administrator will run a
1074 * daemon startup utility, specifying the number of I/O daemons to
1075 * run. The utility will fork off that number of processes,
1076 * each making the appropriate syscall, which will cause this
1077 * function to be invoked.
1079 static int afs_initbiod = 0; /* this is self-initializing code */
1081 afs_BioDaemon (nbiods)
1084 struct afs_bioqueue *self;
1085 afs_int32 code, s, pflg = 0;
1087 struct buf *bp, *bp1, *tbp1, *tbp2; /* temp pointers only */
1092 if (!afs_initbiod) {
1095 /* Initialize the queue of waiting processes, afs_bioqueue. */
1096 QInit (&(afs_bioqueue.lruq));
1099 /* establish ourself as a kernel process so shutdown won't kill us */
1100 /* u.u_procp->p_flag |= SKPROC;*/
1102 /* Initialize a token (self) to use in the queue of sleeping processes. */
1103 self = (struct afs_bioqueue *) afs_osi_Alloc (sizeof (struct afs_bioqueue));
1104 pin (self, sizeof (struct afs_bioqueue)); /* fix in memory */
1105 memset(self, 0, sizeof(*self));
1106 QInit (&(self->lruq)); /* initialize queue entry pointers */
1109 /* Ignore HUP signals... */
1110 SIGDELSET(u.u_procp->p_sig, SIGHUP);
1111 SIGADDSET(u.u_procp->p_sigignore, SIGHUP);
1112 SIGDELSET(u.u_procp->p_sigcatch, SIGHUP);
1113 /* Main body starts here -- this is an intentional infinite loop, and
1116 * Now, the loop will exit if get_bioreq() returns NULL, indicating
1117 * that we've been interrupted.
1120 bp = afs_get_bioreq(self);
1122 break; /* we were interrupted */
1123 if (code = setjmpx(&jmpbuf)) {
1124 /* This should not have happend, maybe a lack of resources */
1126 for (bp1 = bp; bp ; bp = bp1) {
1131 bp->b_flags |= B_ERROR;
1137 vcp = VTOAFS(bp->b_vp);
1138 if (bp->b_flags & B_PFSTORE) {
1139 ObtainWriteLock(&vcp->lock,210);
1140 if (vcp->v.v_gnode->gn_mwrcnt) {
1141 afs_offs_t newlength =
1142 (afs_offs_t) dbtob(bp->b_blkno) + bp->b_bcount;
1143 if (vcp->m.Length < newlength) {
1144 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH,
1145 ICL_TYPE_STRING, __FILE__,
1146 ICL_TYPE_LONG, __LINE__,
1147 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(vcp->m.Length),
1148 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(newlength));
1149 vcp->m.Length = newlength;
1152 ReleaseWriteLock(&vcp->lock);
1154 /* If the buffer represents a protection violation, rather than
1155 * an actual request for I/O, no special action need be taken.
1157 if ( bp->b_flags & B_PFPROT ) {
1158 iodone (bp); /* Notify all users of the buffer that we're done */
1162 ObtainWriteLock(&vcp->pvmlock,558);
1164 * First map its data area to a region in the current address space
1165 * by calling vm_att with the subspace identifier, and a pointer to
1166 * the data area. vm_att returns a new data area pointer, but we
1167 * also want to hang onto the old one.
1169 tmpaddr = bp->b_baddr;
1170 bp->b_baddr = vm_att (bp->b_xmemd.subspace_id, tmpaddr);
1171 tmperr = afs_ustrategy(bp); /* temp variable saves offset calculation */
1172 if (tmperr) { /* in non-error case */
1173 bp->b_flags |= B_ERROR; /* should other flags remain set ??? */
1174 bp->b_error = tmperr;
1177 /* Unmap the buffer's data area by calling vm_det. Reset data area
1178 * to the value that we saved above.
1180 vm_det(bp->b_un.b_addr);
1181 bp->b_baddr = tmpaddr;
1184 * buffer may be linked with other buffers via the b_work field.
1185 * See also naix_vm_strategy. For each buffer in the chain (including
1186 * bp) notify all users of the buffer that the daemon is finished
1187 * using it by calling iodone.
1188 * assumes iodone can modify the b_work field.
1191 tbp2 = (struct buf *) tbp1->b_work;
1196 tbp1 = (struct buf *) tbp2->b_work;
1202 ReleaseWriteLock(&vcp->pvmlock); /* Unlock the vnode. */
1204 } /* infinite loop (unless we're interrupted) */
1205 unpin (self, sizeof (struct afs_bioqueue));
1206 afs_osi_Free (self, sizeof (struct afs_bioqueue));
1207 } /* end of afs_BioDaemon() */
1208 #endif /* AFS_AIX41_ENV */
1209 #endif /* AFS_AIX32_ENV */
1213 void afs_BackgroundDaemon(void)
1215 struct brequest *tb;
1218 AFS_STATCNT(afs_BackgroundDaemon);
1219 /* initialize subsystem */
1221 LOCK_INIT(&afs_xbrs, "afs_xbrs");
1222 memset((char *)afs_brs, 0, sizeof(afs_brs));
1224 #if defined (AFS_SGI_ENV) && defined(AFS_SGI_SHORTSTACK)
1226 * steal the first daemon for doing delayed DSlot flushing
1227 * (see afs_GetDownDSlot)
1236 MObtainWriteLock(&afs_xbrs,302);
1239 struct brequest *min_tb = NULL;
1241 if (afs_termState == AFSOP_STOP_BKG) {
1242 if (--afs_nbrs <= 0)
1243 afs_termState = AFSOP_STOP_TRUNCDAEMON;
1244 MReleaseWriteLock(&afs_xbrs);
1245 afs_osi_Wakeup(&afs_termState);
1249 /* find a request */
1252 for(i=0; i<NBRS; i++, tb++) {
1253 /* look for request with smallest ts */
1254 if ((tb->refCount > 0) && !(tb->flags & BSTARTED)) {
1255 /* new request, not yet picked up */
1256 if ((min_tb && (min_ts - tb->ts > 0)) || !min_tb) {
1262 if ((tb = min_tb)) {
1263 /* claim and process this request */
1264 tb->flags |= BSTARTED;
1265 MReleaseWriteLock(&afs_xbrs);
1267 afs_Trace1(afs_iclSetp, CM_TRACE_BKG1,
1268 ICL_TYPE_INT32, tb->opcode);
1269 if (tb->opcode == BOP_FETCH)
1271 else if (tb->opcode == BOP_STORE)
1273 else if (tb->opcode == BOP_PATH)
1275 else panic("background bop");
1278 tb->vc->vrefCount--; /* fix up reference count */
1280 AFS_RELE(AFSTOV(tb->vc)); /* MUST call vnode layer or could lose vnodes */
1286 tb->cred = (struct AFS_UCRED *) 0;
1288 afs_BRelease(tb); /* this grabs and releases afs_xbrs lock */
1289 MObtainWriteLock(&afs_xbrs,305);
1292 /* wait for new request */
1294 MReleaseWriteLock(&afs_xbrs);
1295 afs_osi_Sleep(&afs_brsDaemons);
1296 MObtainWriteLock(&afs_xbrs,307);
1303 void shutdown_daemons(void)
1305 AFS_STATCNT(shutdown_daemons);
1306 if (afs_cold_shutdown) {
1307 afs_brsDaemons = brsInit = 0;
1308 rxepoch_checked = afs_nbrs = 0;
1309 memset((char *)afs_brs, 0, sizeof(afs_brs));
1310 memset((char *)&afs_xbrs, 0, sizeof(afs_lock_t));
1312 #ifdef AFS_AIX32_ENV
1313 #ifdef AFS_AIX41_ENV
1314 lock_free(&afs_asyncbuf_lock);
1315 unpin(&afs_asyncbuf, sizeof(struct buf*));
1316 pin (&afs_asyncbuf_cv, sizeof(afs_int32));
1317 #else /* AFS_AIX41_ENV */
1320 memset((char *)&afs_bioqueue, 0, sizeof(struct afs_bioqueue));
1327 #if defined(AFS_SGI_ENV) && defined(AFS_SGI_SHORTSTACK)
1329 * sgi - daemon - handles certain operations that otherwise
1330 * would use up too much kernel stack space
1332 * This all assumes that since the caller must have the xdcache lock
1333 * exclusively that the list will never be more than one long
1334 * and noone else can attempt to add anything until we're done.
1336 SV_TYPE afs_sgibksync;
1337 SV_TYPE afs_sgibkwait;
1338 lock_t afs_sgibklock;
1339 struct dcache *afs_sgibklist;
1341 int afs_sgidaemon(void)
1346 if (afs_sgibklock == NULL) {
1347 SV_INIT(&afs_sgibksync, "bksync", 0, 0);
1348 SV_INIT(&afs_sgibkwait, "bkwait", 0, 0);
1349 SPINLOCK_INIT(&afs_sgibklock, "bklock");
1351 s = SPLOCK(afs_sgibklock);
1353 /* wait for something to do */
1354 SP_WAIT(afs_sgibklock, s, &afs_sgibksync, PINOD);
1355 osi_Assert(afs_sgibklist);
1357 /* XX will probably need to generalize to real list someday */
1358 s = SPLOCK(afs_sgibklock);
1359 while (afs_sgibklist) {
1360 tdc = afs_sgibklist;
1361 afs_sgibklist = NULL;
1362 SPUNLOCK(afs_sgibklock, s);
1364 tdc->dflags &= ~DFEntryMod;
1365 afs_WriteDCache(tdc, 1);
1367 s = SPLOCK(afs_sgibklock);
1370 /* done all the work - wake everyone up */
1371 while (SV_SIGNAL(&afs_sgibkwait))