2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "afs/param.h"
18 #include <sys/sleep.h>
21 #include "afs/sysincludes.h" /* Standard vendor system headers */
22 #include "afsincludes.h" /* Afs-based standard headers */
23 #include "afs/afs_stats.h" /* statistics gathering code */
24 #include "afs/afs_cbqueue.h"
26 #include <sys/adspace.h> /* for vm_att(), vm_det() */
30 /* background request queue size */
31 afs_lock_t afs_xbrs; /* lock for brs */
32 static int brsInit = 0;
33 short afs_brsWaiters = 0; /* number of users waiting for brs buffers */
34 short afs_brsDaemons = 0; /* number of daemons waiting for brs requests */
35 struct brequest afs_brs[NBRS]; /* request structures */
36 struct afs_osi_WaitHandle AFS_WaitHandler, AFS_CSWaitHandler;
37 static int afs_brs_count = 0; /* request counter, to service reqs in order */
39 static int rxepoch_checked = 0;
40 #define afs_CheckRXEpoch() {if (rxepoch_checked == 0 && rxkad_EpochWasSet) { \
41 rxepoch_checked = 1; afs_GCUserData(/* force flag */ 1); } }
43 /* PAG garbage collection */
44 /* We induce a compile error if param.h does not define AFS_GCPAGS */
45 afs_int32 afs_gcpags = AFS_GCPAGS;
46 afs_int32 afs_gcpags_procsize = 0;
48 afs_int32 afs_CheckServerDaemonStarted = 0;
49 #ifdef DEFAULT_PROBE_INTERVAL
50 afs_int32 PROBE_INTERVAL = DEFAULT_PROBE_INTERVAL; /* overridding during compile */
52 afs_int32 PROBE_INTERVAL = 180; /* default to 3 min */
55 #define PROBE_WAIT() (1000 * (PROBE_INTERVAL - ((afs_random() & 0x7fffffff) \
56 % (PROBE_INTERVAL/2))))
59 afs_CheckServerDaemon(void)
61 afs_int32 now, delay, lastCheck, last10MinCheck;
63 afs_CheckServerDaemonStarted = 1;
65 while (afs_initState < 101)
66 afs_osi_Sleep(&afs_initState);
67 afs_osi_Wait(PROBE_WAIT(), &AFS_CSWaitHandler, 0);
69 last10MinCheck = lastCheck = osi_Time();
71 if (afs_termState == AFSOP_STOP_CS) {
72 afs_termState = AFSOP_STOP_BKG;
73 afs_osi_Wakeup(&afs_termState);
78 if (PROBE_INTERVAL + lastCheck <= now) {
79 afs_CheckServers(1, NULL); /* check down servers */
80 lastCheck = now = osi_Time();
83 if (600 + last10MinCheck <= now) {
84 afs_Trace1(afs_iclSetp, CM_TRACE_PROBEUP, ICL_TYPE_INT32, 600);
85 afs_CheckServers(0, NULL);
86 last10MinCheck = now = osi_Time();
89 if (afs_termState == AFSOP_STOP_CS) {
90 afs_termState = AFSOP_STOP_BKG;
91 afs_osi_Wakeup(&afs_termState);
95 /* Compute time to next probe. */
96 delay = PROBE_INTERVAL + lastCheck;
97 if (delay > 600 + last10MinCheck)
98 delay = 600 + last10MinCheck;
102 afs_osi_Wait(delay * 1000, &AFS_CSWaitHandler, 0);
104 afs_CheckServerDaemonStarted = 0;
111 struct afs_exporter *exporter;
113 afs_int32 last3MinCheck, last10MinCheck, last60MinCheck, lastNMinCheck;
114 afs_int32 last1MinCheck;
115 afs_uint32 lastCBSlotBump;
118 AFS_STATCNT(afs_Daemon);
119 last1MinCheck = last3MinCheck = last60MinCheck = last10MinCheck =
122 afs_rootFid.Fid.Volume = 0;
123 while (afs_initState < 101)
124 afs_osi_Sleep(&afs_initState);
127 lastCBSlotBump = now;
129 /* when a lot of clients are booted simultaneously, they develop
130 * annoying synchronous VL server bashing behaviors. So we stagger them.
132 last1MinCheck = now + ((afs_random() & 0x7fffffff) % 60); /* an extra 30 */
133 last3MinCheck = now - 90 + ((afs_random() & 0x7fffffff) % 180);
134 last60MinCheck = now - 1800 + ((afs_random() & 0x7fffffff) % 3600);
135 last10MinCheck = now - 300 + ((afs_random() & 0x7fffffff) % 600);
136 lastNMinCheck = now - 90 + ((afs_random() & 0x7fffffff) % 180);
138 /* start off with afs_initState >= 101 (basic init done) */
140 afs_CheckCallbacks(20); /* unstat anything which will expire soon */
142 /* things to do every 20 seconds or less - required by protocol spec */
144 afs_FlushActiveVcaches(0); /* flush NFS writes */
145 afs_FlushVCBs(1); /* flush queued callbacks */
146 afs_MaybeWakeupTruncateDaemon(); /* free cache space if have too */
147 rx_CheckPackets(); /* Does RX need more packets? */
148 #if defined(AFS_AIX32_ENV) || defined(AFS_HPUX_ENV)
150 * Hack: We always want to make sure there are plenty free
151 * entries in the small free pool so that we don't have to
152 * worry about rx (with disabled interrupts) to have to call
153 * malloc). So we do the dummy call below...
155 if (((afs_stats_cmperf.SmallBlocksAlloced -
156 afs_stats_cmperf.SmallBlocksActive)
157 <= AFS_SALLOC_LOW_WATER))
158 osi_FreeSmallSpace(osi_AllocSmallSpace(AFS_SMALLOCSIZ));
159 if (((afs_stats_cmperf.MediumBlocksAlloced -
160 afs_stats_cmperf.MediumBlocksActive)
161 <= AFS_MALLOC_LOW_WATER + 50))
162 osi_AllocMoreMSpace(AFS_MALLOC_LOW_WATER * 2);
166 if (lastCBSlotBump + CBHTSLOTLEN < now) { /* pretty time-dependant */
167 lastCBSlotBump = now;
168 if (afs_BumpBase()) {
169 afs_CheckCallbacks(20); /* unstat anything which will expire soon */
173 if (last1MinCheck + 60 < now) {
174 /* things to do every minute */
175 DFlush(); /* write out dir buffers */
176 afs_WriteThroughDSlots(); /* write through cacheinfo entries */
177 afs_FlushActiveVcaches(1); /* keep flocks held & flush nfs writes */
178 #ifdef AFS_DISCON_ENV
179 afs_StoreDirtyVcaches();
185 if (last3MinCheck + 180 < now) {
186 afs_CheckTokenCache(); /* check for access cache resets due to expired
190 if (!afs_CheckServerDaemonStarted) {
191 /* Do the check here if the correct afsd is not installed. */
194 printf("Please install afsd with check server daemon.\n");
196 if (lastNMinCheck + PROBE_INTERVAL < now) {
197 /* only check down servers */
198 afs_CheckServers(1, NULL);
202 if (last10MinCheck + 600 < now) {
203 #ifdef AFS_USERSPACE_IP_ADDR
204 extern int rxi_GetcbiInfo(void);
206 afs_Trace1(afs_iclSetp, CM_TRACE_PROBEUP, ICL_TYPE_INT32, 600);
207 #ifdef AFS_USERSPACE_IP_ADDR
208 if (rxi_GetcbiInfo()) { /* addresses changed from last time */
211 #else /* AFS_USERSPACE_IP_ADDR */
212 if (rxi_GetIFInfo()) { /* addresses changed from last time */
215 #endif /* else AFS_USERSPACE_IP_ADDR */
216 if (!afs_CheckServerDaemonStarted)
217 afs_CheckServers(0, NULL);
218 afs_GCUserData(0); /* gc old conns */
219 /* This is probably the wrong way of doing GC for the various exporters but it will suffice for a while */
220 for (exporter = root_exported; exporter;
221 exporter = exporter->exp_next) {
222 (void)EXP_GC(exporter, 0); /* Generalize params */
227 afs_CheckVolumeNames(AFS_VOLCHECK_EXPIRED |
231 afs_CheckVolumeNames(AFS_VOLCHECK_EXPIRED |
236 last10MinCheck = now;
238 if (last60MinCheck + 3600 < now) {
239 afs_Trace1(afs_iclSetp, CM_TRACE_PROBEVOLUME, ICL_TYPE_INT32,
241 afs_CheckRootVolume();
243 if (afs_gcpags == AFS_GCPAGS_OK) {
248 last60MinCheck = now;
250 if (afs_initState < 300) { /* while things ain't rosy */
251 code = afs_CheckRootVolume();
253 afs_initState = 300; /* succeeded */
254 if (afs_initState < 200)
255 afs_initState = 200; /* tried once */
256 afs_osi_Wakeup(&afs_initState);
259 /* 18285 is because we're trying to divide evenly into 128, that is,
260 * CBSlotLen, while staying just under 20 seconds. If CBSlotLen
261 * changes, should probably change this interval, too.
262 * Some of the preceding actions may take quite some time, so we
263 * might not want to wait the entire interval */
264 now = 18285 - (osi_Time() - now);
266 afs_osi_Wait(now, &AFS_WaitHandler, 0);
269 if (afs_termState == AFSOP_STOP_AFS) {
270 if (afs_CheckServerDaemonStarted)
271 afs_termState = AFSOP_STOP_CS;
273 afs_termState = AFSOP_STOP_BKG;
274 afs_osi_Wakeup(&afs_termState);
281 afs_CheckRootVolume(void)
283 char rootVolName[32];
284 struct volume *tvp = NULL;
285 int usingDynroot = afs_GetDynrootEnable();
288 AFS_STATCNT(afs_CheckRootVolume);
289 if (*afs_rootVolumeName == 0) {
290 strcpy(rootVolName, "root.afs");
292 strcpy(rootVolName, afs_rootVolumeName);
296 struct cell *lc = afs_GetPrimaryCell(READ_LOCK);
300 localcell = lc->cellNum;
301 afs_PutCell(lc, READ_LOCK);
305 afs_GetDynrootFid(&afs_rootFid);
306 tvp = afs_GetVolume(&afs_rootFid, NULL, READ_LOCK);
308 tvp = afs_GetVolumeByName(rootVolName, localcell, 1, NULL, READ_LOCK);
310 if (!tvp && !usingDynroot) {
312 int len = strlen(rootVolName);
314 if ((len < 9) || strcmp(&rootVolName[len - 9], ".readonly")) {
315 strcpy(buf, rootVolName);
316 afs_strcat(buf, ".readonly");
317 tvp = afs_GetVolumeByName(buf, localcell, 1, NULL, READ_LOCK);
322 int volid = (tvp->roVol ? tvp->roVol : tvp->volume);
323 afs_rootFid.Cell = localcell;
324 if (afs_rootFid.Fid.Volume && afs_rootFid.Fid.Volume != volid
326 /* If we had a root fid before and it changed location we reset
327 * the afs_globalVp so that it will be reevaluated.
328 * Just decrement the reference count. This only occurs during
329 * initial cell setup and can panic the machine if we set the
330 * count to zero and fs checkv is executed when the current
333 AFS_FAST_RELE(afs_globalVp);
336 afs_rootFid.Fid.Volume = volid;
337 afs_rootFid.Fid.Vnode = 1;
338 afs_rootFid.Fid.Unique = 1;
340 afs_initState = 300; /* won */
341 afs_osi_Wakeup(&afs_initState);
342 afs_PutVolume(tvp, READ_LOCK);
345 /* This is to make sure that we update the root gnode */
346 /* every time root volume gets released */
348 struct gnode *rootgp;
352 /* Only do this if afs_globalVFS is properly set due to race conditions
353 * this routine could be called before the gfs_mount is performed!
354 * Furthermore, afs_root (called below) *waits* until
355 * initState >= 200, so we don't try this until we've gotten
356 * at least that far */
357 if (afs_globalVFS && afs_initState >= 200) {
358 if (code = afs_root(afs_globalVFS, &rootgp))
360 mp = (struct mount *)afs_globalVFS->vfs_data;
361 mp->m_rootgp = gget(mp, 0, 0, (char *)rootgp);
362 afs_unlock(mp->m_rootgp); /* unlock basic gnode */
363 afs_vrele(VTOAFS(rootgp)); /* zap afs_root's vnode hold */
367 if (afs_rootFid.Fid.Volume)
373 /* ptr_parm 0 is the pathname, size_parm 0 to the fetch is the chunk number */
375 BPath(register struct brequest *ab)
377 register struct dcache *tdc = NULL;
378 struct vcache *tvc = NULL;
379 struct vnode *tvn = NULL;
380 #ifdef AFS_LINUX22_ENV
381 struct dentry *dp = NULL;
383 afs_size_t offset, len;
384 struct vrequest treq;
388 if ((code = afs_InitReq(&treq, ab->cred)))
391 #ifdef AFS_LINUX22_ENV
392 code = gop_lookupname((char *)ab->ptr_parm[0], AFS_UIOSYS, 1, NULL, &dp);
394 tvn = (struct vnode *)dp->d_inode;
396 code = gop_lookupname((char *)ab->ptr_parm[0], AFS_UIOSYS, 1, NULL, &tvn);
399 osi_FreeLargeSpace((char *)ab->ptr_parm[0]); /* free path name buffer here */
402 /* now path may not have been in afs, so check that before calling our cache manager */
403 if (!tvn || !IsAfsVnode(tvn)) {
404 /* release it and give up */
409 #ifdef AFS_LINUX22_ENV
419 tvc = VTOAFS(afs_gntovn(tvn));
423 /* here we know its an afs vnode, so we can get the data for the chunk */
424 tdc = afs_GetDCache(tvc, ab->size_parm[0], &treq, &offset, &len, 1);
431 #ifdef AFS_LINUX22_ENV
439 /* size_parm 0 to the fetch is the chunk number,
440 * ptr_parm 0 is the dcache entry to wakeup,
441 * size_parm 1 is true iff we should release the dcache entry here.
444 BPrefetch(register struct brequest *ab)
446 register struct dcache *tdc;
447 register struct vcache *tvc;
448 afs_size_t offset, len;
449 struct vrequest treq;
451 AFS_STATCNT(BPrefetch);
452 if ((len = afs_InitReq(&treq, ab->cred)))
455 tdc = afs_GetDCache(tvc, ab->size_parm[0], &treq, &offset, &len, 1);
459 /* now, dude may be waiting for us to clear DFFetchReq bit; do so. Can't
460 * use tdc from GetDCache since afs_GetDCache may fail, but someone may
461 * be waiting for our wakeup anyway.
463 tdc = (struct dcache *)(ab->ptr_parm[0]);
464 ObtainSharedLock(&tdc->lock, 640);
465 if (tdc->mflags & DFFetchReq) {
466 UpgradeSToWLock(&tdc->lock, 641);
467 tdc->mflags &= ~DFFetchReq;
468 ReleaseWriteLock(&tdc->lock);
470 ReleaseSharedLock(&tdc->lock);
472 afs_osi_Wakeup(&tdc->validPos);
473 if (ab->size_parm[1]) {
474 afs_PutDCache(tdc); /* put this one back, too */
480 BStore(register struct brequest *ab)
482 register struct vcache *tvc;
483 register afs_int32 code;
484 struct vrequest treq;
485 #if defined(AFS_SGI_ENV)
486 struct cred *tmpcred;
490 if ((code = afs_InitReq(&treq, ab->cred)))
494 #if defined(AFS_SGI_ENV)
496 * Since StoreOnLastReference can end up calling osi_SyncVM which
497 * calls into VM code that assumes that u.u_cred has the
498 * correct credentials, we set our to theirs for this xaction
500 tmpcred = OSI_GET_CURRENT_CRED();
501 OSI_SET_CURRENT_CRED(ab->cred);
504 * To avoid recursion since the WriteLock may be released during VM
505 * operations, we hold the VOP_RWLOCK across this transaction as
506 * do the other callers of StoreOnLastReference
508 AFS_RWLOCK((vnode_t *) tvc, 1);
510 ObtainWriteLock(&tvc->lock, 209);
511 code = afs_StoreOnLastReference(tvc, &treq);
512 ReleaseWriteLock(&tvc->lock);
513 #if defined(AFS_SGI_ENV)
514 OSI_SET_CURRENT_CRED(tmpcred);
515 AFS_RWUNLOCK((vnode_t *) tvc, 1);
517 /* now set final return code, and wakeup anyone waiting */
518 if ((ab->flags & BUVALID) == 0) {
519 ab->code = afs_CheckCode(code, &treq, 43); /* set final code, since treq doesn't go across processes */
520 ab->flags |= BUVALID;
521 if (ab->flags & BUWAIT) {
522 ab->flags &= ~BUWAIT;
528 /* release a held request buffer */
530 afs_BRelease(register struct brequest *ab)
533 AFS_STATCNT(afs_BRelease);
534 MObtainWriteLock(&afs_xbrs, 294);
535 if (--ab->refCount <= 0) {
539 afs_osi_Wakeup(&afs_brsWaiters);
540 MReleaseWriteLock(&afs_xbrs);
543 /* return true if bkg fetch daemons are all busy */
547 AFS_STATCNT(afs_BBusy);
548 if (afs_brsDaemons > 0)
554 afs_BQueue(register short aopcode, register struct vcache *avc,
555 afs_int32 dontwait, afs_int32 ause, struct AFS_UCRED *acred,
556 afs_size_t asparm0, afs_size_t asparm1, void *apparm0)
559 register struct brequest *tb;
561 AFS_STATCNT(afs_BQueue);
562 MObtainWriteLock(&afs_xbrs, 296);
565 for (i = 0; i < NBRS; i++, tb++) {
566 if (tb->refCount == 0)
571 tb->opcode = aopcode;
579 #if defined(AFS_NETBSD_ENV) || defined(AFS_OBSD_ENV)
580 AFS_HOLD(AFSTOV(avc));
582 VN_HOLD(AFSTOV(avc));
586 tb->refCount = ause + 1;
587 tb->size_parm[0] = asparm0;
588 tb->size_parm[1] = asparm1;
589 tb->ptr_parm[0] = apparm0;
592 tb->ts = afs_brs_count++;
593 /* if daemons are waiting for work, wake them up */
594 if (afs_brsDaemons > 0) {
595 afs_osi_Wakeup(&afs_brsDaemons);
597 MReleaseWriteLock(&afs_xbrs);
601 MReleaseWriteLock(&afs_xbrs);
604 /* no free buffers, sleep a while */
606 MReleaseWriteLock(&afs_xbrs);
607 afs_osi_Sleep(&afs_brsWaiters);
608 MObtainWriteLock(&afs_xbrs, 301);
615 /* AIX 4.1 has a much different sleep/wakeup mechanism available for use.
616 * The modifications here will work for either a UP or MP machine.
618 struct buf *afs_asyncbuf = (struct buf *)0;
619 tid_t afs_asyncbuf_cv = EVENT_NULL;
620 afs_int32 afs_biodcnt = 0;
622 /* in implementing this, I assumed that all external linked lists were
625 * Several places in this code traverse a linked list. The algorithm
626 * used here is probably unfamiliar to most people. Careful examination
627 * will show that it eliminates an assignment inside the loop, as compared
628 * to the standard algorithm, at the cost of occasionally using an extra
634 * This function obtains, and returns, a pointer to a buffer for
635 * processing by a daemon. It sleeps until such a buffer is available.
636 * The source of buffers for it is the list afs_asyncbuf (see also
637 * naix_vm_strategy). This function may be invoked concurrently by
638 * several processes, that is, several instances of the same daemon.
639 * naix_vm_strategy, which adds buffers to the list, runs at interrupt
640 * level, while get_bioreq runs at process level.
642 * Since AIX 4.1 can wake just one process at a time, the separate sleep
643 * addresses have been removed.
644 * Note that the kernel_lock is held until the e_sleep_thread() occurs.
645 * The afs_asyncbuf_lock is primarily used to serialize access between
646 * process and interrupts.
648 Simple_lock afs_asyncbuf_lock;
649 /*static*/ struct buf *
652 struct buf *bp = NULL;
654 struct buf **bestlbpP, **lbpP;
656 struct buf *t1P, *t2P; /* temp pointers for list manipulation */
659 struct afs_bioqueue *s;
661 /* ??? Does the forward pointer of the returned buffer need to be NULL?
664 /* Disable interrupts from the strategy function, and save the
665 * prior priority level and lock access to the afs_asyncbuf.
668 oldPriority = disable_lock(INTMAX, &afs_asyncbuf_lock);
672 /* look for oldest buffer */
673 bp = bestbp = afs_asyncbuf;
674 bestage = (long)bestbp->av_back;
675 bestlbpP = &afs_asyncbuf;
681 if ((long)bp->av_back - bestage < 0) {
684 bestage = (long)bp->av_back;
688 *bestlbpP = bp->av_forw;
691 /* If afs_asyncbuf is null, it is necessary to go to sleep.
692 * e_wakeup_one() ensures that only one thread wakes.
695 /* The LOCK_HANDLER indicates to e_sleep_thread to only drop the
696 * lock on an MP machine.
699 e_sleep_thread(&afs_asyncbuf_cv, &afs_asyncbuf_lock,
700 LOCK_HANDLER | INTERRUPTIBLE);
701 if (interrupted == THREAD_INTERRUPTED) {
702 /* re-enable interrupts from strategy */
703 unlock_enable(oldPriority, &afs_asyncbuf_lock);
707 } /* end of "else asyncbuf is empty" */
708 } /* end of "inner loop" */
712 unlock_enable(oldPriority, &afs_asyncbuf_lock);
715 /* For the convenience of other code, replace the gnodes in
716 * the b_vp field of bp and the other buffers on the b_work
717 * chain with the corresponding vnodes.
719 * ??? what happens to the gnodes? They're not just cut loose,
723 t2P = (struct buf *)t1P->b_work;
724 t1P->b_vp = ((struct gnode *)t1P->b_vp)->gn_vnode;
728 t1P = (struct buf *)t2P->b_work;
729 t2P->b_vp = ((struct gnode *)t2P->b_vp)->gn_vnode;
734 /* If the buffer does not specify I/O, it may immediately
735 * be returned to the caller. This condition is detected
736 * by examining the buffer's flags (the b_flags field). If
737 * the B_PFPROT bit is set, the buffer represents a protection
738 * violation, rather than a request for I/O. The remainder
739 * of the outer loop handles the case where the B_PFPROT bit is clear.
741 if (bp->b_flags & B_PFPROT) {
746 } /* end of function get_bioreq() */
751 * This function is the daemon. It is called from the syscall
752 * interface. Ordinarily, a script or an administrator will run a
753 * daemon startup utility, specifying the number of I/O daemons to
754 * run. The utility will fork off that number of processes,
755 * each making the appropriate syscall, which will cause this
756 * function to be invoked.
758 static int afs_initbiod = 0; /* this is self-initializing code */
761 afs_BioDaemon(afs_int32 nbiods)
763 afs_int32 code, s, pflg = 0;
765 struct buf *bp, *bp1, *tbp1, *tbp2; /* temp pointers only */
773 /* pin lock, since we'll be using it in an interrupt. */
774 lock_alloc(&afs_asyncbuf_lock, LOCK_ALLOC_PIN, 2, 1);
775 simple_lock_init(&afs_asyncbuf_lock);
776 pin(&afs_asyncbuf, sizeof(struct buf *));
777 pin(&afs_asyncbuf_cv, sizeof(afs_int32));
780 /* Ignore HUP signals... */
782 sigset_t sigbits, osigbits;
784 * add SIGHUP to the set of already masked signals
786 SIGFILLSET(sigbits); /* allow all signals */
787 SIGDELSET(sigbits, SIGHUP); /* except SIGHUP */
788 limit_sigs(&sigbits, &osigbits); /* and already masked */
790 /* Main body starts here -- this is an intentional infinite loop, and
793 * Now, the loop will exit if get_bioreq() returns NULL, indicating
794 * that we've been interrupted.
797 bp = afs_get_bioreq();
799 break; /* we were interrupted */
800 if (code = setjmpx(&jmpbuf)) {
801 /* This should not have happend, maybe a lack of resources */
803 s = disable_lock(INTMAX, &afs_asyncbuf_lock);
804 for (bp1 = bp; bp; bp = bp1) {
806 bp1 = (struct buf *)bp1->b_work;
809 bp->b_flags |= B_ERROR;
812 unlock_enable(s, &afs_asyncbuf_lock);
816 vcp = VTOAFS(bp->b_vp);
817 if (bp->b_flags & B_PFSTORE) { /* XXXX */
818 ObtainWriteLock(&vcp->lock, 404);
819 if (vcp->v.v_gnode->gn_mwrcnt) {
820 afs_offs_t newlength =
821 (afs_offs_t) dbtob(bp->b_blkno) + bp->b_bcount;
822 if (vcp->m.Length < newlength) {
823 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH,
824 ICL_TYPE_STRING, __FILE__, ICL_TYPE_LONG,
825 __LINE__, ICL_TYPE_OFFSET,
826 ICL_HANDLE_OFFSET(vcp->m.Length),
827 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(newlength));
828 vcp->m.Length = newlength;
831 ReleaseWriteLock(&vcp->lock);
833 /* If the buffer represents a protection violation, rather than
834 * an actual request for I/O, no special action need be taken.
836 if (bp->b_flags & B_PFPROT) {
837 iodone(bp); /* Notify all users of the buffer that we're done */
842 ObtainWriteLock(&vcp->pvmlock, 211);
844 * First map its data area to a region in the current address space
845 * by calling vm_att with the subspace identifier, and a pointer to
846 * the data area. vm_att returns a new data area pointer, but we
847 * also want to hang onto the old one.
849 tmpaddr = bp->b_baddr;
850 bp->b_baddr = (caddr_t) vm_att(bp->b_xmemd.subspace_id, tmpaddr);
851 tmperr = afs_ustrategy(bp); /* temp variable saves offset calculation */
852 if (tmperr) { /* in non-error case */
853 bp->b_flags |= B_ERROR; /* should other flags remain set ??? */
854 bp->b_error = tmperr;
857 /* Unmap the buffer's data area by calling vm_det. Reset data area
858 * to the value that we saved above.
861 bp->b_baddr = tmpaddr;
864 * buffer may be linked with other buffers via the b_work field.
865 * See also naix_vm_strategy. For each buffer in the chain (including
866 * bp) notify all users of the buffer that the daemon is finished
867 * using it by calling iodone.
868 * assumes iodone can modify the b_work field.
871 tbp2 = (struct buf *)tbp1->b_work;
876 tbp1 = (struct buf *)tbp2->b_work;
882 ReleaseWriteLock(&vcp->pvmlock); /* Unlock the vnode. */
884 } /* infinite loop (unless we're interrupted) */
885 } /* end of afs_BioDaemon() */
887 #else /* AFS_AIX41_ENV */
891 struct afs_bioqueue {
896 struct afs_bioqueue afs_bioqueue;
897 struct buf *afs_busyq = NULL;
898 struct buf *afs_asyncbuf;
899 afs_int32 afs_biodcnt = 0;
901 /* in implementing this, I assumed that all external linked lists were
904 * Several places in this code traverse a linked list. The algorithm
905 * used here is probably unfamiliar to most people. Careful examination
906 * will show that it eliminates an assignment inside the loop, as compared
907 * to the standard algorithm, at the cost of occasionally using an extra
913 * This function obtains, and returns, a pointer to a buffer for
914 * processing by a daemon. It sleeps until such a buffer is available.
915 * The source of buffers for it is the list afs_asyncbuf (see also
916 * naix_vm_strategy). This function may be invoked concurrently by
917 * several processes, that is, several instances of the same daemon.
918 * naix_vm_strategy, which adds buffers to the list, runs at interrupt
919 * level, while get_bioreq runs at process level.
921 * The common kernel paradigm of sleeping and waking up, in which all the
922 * competing processes sleep waiting for wakeups on one address, is not
923 * followed here. Instead, the following paradigm is used: when a daemon
924 * goes to sleep, it checks for other sleeping daemons. If there aren't any,
925 * it sleeps on the address of variable afs_asyncbuf. But if there is
926 * already a daemon sleeping on that address, it threads its own unique
927 * address onto a list, and sleeps on that address. This way, every
928 * sleeper is sleeping on a different address, and every wakeup wakes up
929 * exactly one daemon. This prevents a whole bunch of daemons from waking
930 * up and then immediately having to go back to sleep. This provides a
931 * performance gain and makes the I/O scheduling a bit more deterministic.
932 * The list of sleepers is variable afs_bioqueue. The unique address
933 * on which to sleep is passed to get_bioreq as its parameter.
935 /*static*/ struct buf *
937 struct afs_bioqueue *self; /* address on which to sleep */
940 struct buf *bp = NULL;
942 struct buf **bestlbpP, **lbpP;
944 struct buf *t1P, *t2P; /* temp pointers for list manipulation */
947 struct afs_bioqueue *s;
949 /* ??? Does the forward pointer of the returned buffer need to be NULL?
952 /* Disable interrupts from the strategy function, and save the
953 * prior priority level
955 oldPriority = i_disable(INTMAX);
957 /* Each iteration of following loop either pulls
958 * a buffer off afs_asyncbuf, or sleeps.
960 while (1) { /* inner loop */
962 /* look for oldest buffer */
963 bp = bestbp = afs_asyncbuf;
964 bestage = (int)bestbp->av_back;
965 bestlbpP = &afs_asyncbuf;
971 if ((int)bp->av_back - bestage < 0) {
974 bestage = (int)bp->av_back;
978 *bestlbpP = bp->av_forw;
983 /* If afs_asyncbuf is null, it is necessary to go to sleep.
984 * There are two possibilities: either there is already a
985 * daemon that is sleeping on the address of afs_asyncbuf,
988 if (afs_bioqueue.sleeper) {
990 QAdd(&(afs_bioqueue.lruq), &(self->lruq));
991 interrupted = sleep((caddr_t) self, PCATCH | (PZERO + 1));
992 if (self->lruq.next != &self->lruq) { /* XXX ##3 XXX */
993 QRemove(&(self->lruq)); /* dequeue */
996 afs_bioqueue.sleeper = FALSE;
998 /* re-enable interrupts from strategy */
999 i_enable(oldPriority);
1004 afs_bioqueue.sleeper = TRUE;
1006 sleep((caddr_t) & afs_asyncbuf, PCATCH | (PZERO + 1));
1007 afs_bioqueue.sleeper = FALSE;
1010 * We need to wakeup another daemon if present
1011 * since we were waiting on afs_asyncbuf.
1013 #ifdef notdef /* The following doesn't work as advertised */
1014 if (afs_bioqueue.lruq.next != &afs_bioqueue.lruq) {
1015 struct squeue *bq = afs_bioqueue.lruq.next;
1020 /* re-enable interrupts from strategy */
1021 i_enable(oldPriority);
1027 } /* end of "else asyncbuf is empty" */
1028 } /* end of "inner loop" */
1032 i_enable(oldPriority); /* re-enable interrupts from strategy */
1034 /* For the convenience of other code, replace the gnodes in
1035 * the b_vp field of bp and the other buffers on the b_work
1036 * chain with the corresponding vnodes.
1038 * ??? what happens to the gnodes? They're not just cut loose,
1042 t2P = (struct buf *)t1P->b_work;
1043 t1P->b_vp = ((struct gnode *)t1P->b_vp)->gn_vnode;
1047 t1P = (struct buf *)t2P->b_work;
1048 t2P->b_vp = ((struct gnode *)t2P->b_vp)->gn_vnode;
1053 /* If the buffer does not specify I/O, it may immediately
1054 * be returned to the caller. This condition is detected
1055 * by examining the buffer's flags (the b_flags field). If
1056 * the B_PFPROT bit is set, the buffer represents a protection
1057 * violation, rather than a request for I/O. The remainder
1058 * of the outer loop handles the case where the B_PFPROT bit is clear.
1060 if (bp->b_flags & B_PFPROT) {
1064 /* wake up another process to handle the next buffer, and return
1067 oldPriority = i_disable(INTMAX);
1069 /* determine where to find the sleeping process.
1070 * There are two cases: either it is sleeping on
1071 * afs_asyncbuf, or it is sleeping on its own unique
1072 * address. These cases are distinguished by examining
1073 * the sleeper field of afs_bioqueue.
1075 if (afs_bioqueue.sleeper) {
1076 wakeup(&afs_asyncbuf);
1078 if (afs_bioqueue.lruq.next == &afs_bioqueue.lruq) {
1079 /* queue is empty, what now? ??? */
1080 /* Should this be impossible, or does */
1081 /* it just mean that nobody is sleeping? */ ;
1083 struct squeue *bq = afs_bioqueue.lruq.next;
1087 afs_bioqueue.sleeper = TRUE;
1090 i_enable(oldPriority); /* re-enable interrupts from strategy */
1093 } /* end of function get_bioreq() */
1098 * This function is the daemon. It is called from the syscall
1099 * interface. Ordinarily, a script or an administrator will run a
1100 * daemon startup utility, specifying the number of I/O daemons to
1101 * run. The utility will fork off that number of processes,
1102 * each making the appropriate syscall, which will cause this
1103 * function to be invoked.
1105 static int afs_initbiod = 0; /* this is self-initializing code */
1107 afs_BioDaemon(nbiods)
1110 struct afs_bioqueue *self;
1111 afs_int32 code, s, pflg = 0;
1113 struct buf *bp, *bp1, *tbp1, *tbp2; /* temp pointers only */
1118 if (!afs_initbiod) {
1121 /* Initialize the queue of waiting processes, afs_bioqueue. */
1122 QInit(&(afs_bioqueue.lruq));
1125 /* establish ourself as a kernel process so shutdown won't kill us */
1126 /* u.u_procp->p_flag |= SKPROC;*/
1128 /* Initialize a token (self) to use in the queue of sleeping processes. */
1129 self = (struct afs_bioqueue *)afs_osi_Alloc(sizeof(struct afs_bioqueue));
1130 pin(self, sizeof(struct afs_bioqueue)); /* fix in memory */
1131 memset(self, 0, sizeof(*self));
1132 QInit(&(self->lruq)); /* initialize queue entry pointers */
1135 /* Ignore HUP signals... */
1136 SIGDELSET(u.u_procp->p_sig, SIGHUP);
1137 SIGADDSET(u.u_procp->p_sigignore, SIGHUP);
1138 SIGDELSET(u.u_procp->p_sigcatch, SIGHUP);
1139 /* Main body starts here -- this is an intentional infinite loop, and
1142 * Now, the loop will exit if get_bioreq() returns NULL, indicating
1143 * that we've been interrupted.
1146 bp = afs_get_bioreq(self);
1148 break; /* we were interrupted */
1149 if (code = setjmpx(&jmpbuf)) {
1150 /* This should not have happend, maybe a lack of resources */
1152 for (bp1 = bp; bp; bp = bp1) {
1157 bp->b_flags |= B_ERROR;
1163 vcp = VTOAFS(bp->b_vp);
1164 if (bp->b_flags & B_PFSTORE) {
1165 ObtainWriteLock(&vcp->lock, 210);
1166 if (vcp->v.v_gnode->gn_mwrcnt) {
1167 afs_offs_t newlength =
1168 (afs_offs_t) dbtob(bp->b_blkno) + bp->b_bcount;
1169 if (vcp->m.Length < newlength) {
1170 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH,
1171 ICL_TYPE_STRING, __FILE__, ICL_TYPE_LONG,
1172 __LINE__, ICL_TYPE_OFFSET,
1173 ICL_HANDLE_OFFSET(vcp->m.Length),
1174 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(newlength));
1175 vcp->m.Length = newlength;
1178 ReleaseWriteLock(&vcp->lock);
1180 /* If the buffer represents a protection violation, rather than
1181 * an actual request for I/O, no special action need be taken.
1183 if (bp->b_flags & B_PFPROT) {
1184 iodone(bp); /* Notify all users of the buffer that we're done */
1188 ObtainWriteLock(&vcp->pvmlock, 558);
1190 * First map its data area to a region in the current address space
1191 * by calling vm_att with the subspace identifier, and a pointer to
1192 * the data area. vm_att returns a new data area pointer, but we
1193 * also want to hang onto the old one.
1195 tmpaddr = bp->b_baddr;
1196 bp->b_baddr = vm_att(bp->b_xmemd.subspace_id, tmpaddr);
1197 tmperr = afs_ustrategy(bp); /* temp variable saves offset calculation */
1198 if (tmperr) { /* in non-error case */
1199 bp->b_flags |= B_ERROR; /* should other flags remain set ??? */
1200 bp->b_error = tmperr;
1203 /* Unmap the buffer's data area by calling vm_det. Reset data area
1204 * to the value that we saved above.
1206 vm_det(bp->b_un.b_addr);
1207 bp->b_baddr = tmpaddr;
1210 * buffer may be linked with other buffers via the b_work field.
1211 * See also naix_vm_strategy. For each buffer in the chain (including
1212 * bp) notify all users of the buffer that the daemon is finished
1213 * using it by calling iodone.
1214 * assumes iodone can modify the b_work field.
1217 tbp2 = (struct buf *)tbp1->b_work;
1222 tbp1 = (struct buf *)tbp2->b_work;
1228 ReleaseWriteLock(&vcp->pvmlock); /* Unlock the vnode. */
1230 } /* infinite loop (unless we're interrupted) */
1231 unpin(self, sizeof(struct afs_bioqueue));
1232 afs_osi_Free(self, sizeof(struct afs_bioqueue));
1233 } /* end of afs_BioDaemon() */
1234 #endif /* AFS_AIX41_ENV */
1235 #endif /* AFS_AIX32_ENV */
1240 afs_BackgroundDaemon(void)
1242 struct brequest *tb;
1245 AFS_STATCNT(afs_BackgroundDaemon);
1246 /* initialize subsystem */
1248 LOCK_INIT(&afs_xbrs, "afs_xbrs");
1249 memset((char *)afs_brs, 0, sizeof(afs_brs));
1251 #if defined (AFS_SGI_ENV) && defined(AFS_SGI_SHORTSTACK)
1253 * steal the first daemon for doing delayed DSlot flushing
1254 * (see afs_GetDownDSlot)
1263 MObtainWriteLock(&afs_xbrs, 302);
1266 struct brequest *min_tb = NULL;
1268 if (afs_termState == AFSOP_STOP_BKG) {
1269 if (--afs_nbrs <= 0)
1270 afs_termState = AFSOP_STOP_TRUNCDAEMON;
1271 MReleaseWriteLock(&afs_xbrs);
1272 afs_osi_Wakeup(&afs_termState);
1276 /* find a request */
1279 for (i = 0; i < NBRS; i++, tb++) {
1280 /* look for request with smallest ts */
1281 if ((tb->refCount > 0) && !(tb->flags & BSTARTED)) {
1282 /* new request, not yet picked up */
1283 if ((min_tb && (min_ts - tb->ts > 0)) || !min_tb) {
1289 if ((tb = min_tb)) {
1290 /* claim and process this request */
1291 tb->flags |= BSTARTED;
1292 MReleaseWriteLock(&afs_xbrs);
1294 afs_Trace1(afs_iclSetp, CM_TRACE_BKG1, ICL_TYPE_INT32,
1296 if (tb->opcode == BOP_FETCH)
1298 else if (tb->opcode == BOP_STORE)
1300 else if (tb->opcode == BOP_PATH)
1303 panic("background bop");
1306 tb->vc->vrefCount--; /* fix up reference count */
1308 AFS_RELE(AFSTOV(tb->vc)); /* MUST call vnode layer or could lose vnodes */
1314 tb->cred = (struct AFS_UCRED *)0;
1316 afs_BRelease(tb); /* this grabs and releases afs_xbrs lock */
1317 MObtainWriteLock(&afs_xbrs, 305);
1320 /* wait for new request */
1322 MReleaseWriteLock(&afs_xbrs);
1323 afs_osi_Sleep(&afs_brsDaemons);
1324 MObtainWriteLock(&afs_xbrs, 307);
1332 shutdown_daemons(void)
1334 AFS_STATCNT(shutdown_daemons);
1335 if (afs_cold_shutdown) {
1336 afs_brsDaemons = brsInit = 0;
1337 rxepoch_checked = afs_nbrs = 0;
1338 memset((char *)afs_brs, 0, sizeof(afs_brs));
1339 memset((char *)&afs_xbrs, 0, sizeof(afs_lock_t));
1341 #ifdef AFS_AIX32_ENV
1342 #ifdef AFS_AIX41_ENV
1343 lock_free(&afs_asyncbuf_lock);
1344 unpin(&afs_asyncbuf, sizeof(struct buf *));
1345 pin(&afs_asyncbuf_cv, sizeof(afs_int32));
1346 #else /* AFS_AIX41_ENV */
1349 memset((char *)&afs_bioqueue, 0, sizeof(struct afs_bioqueue));
1356 #if defined(AFS_SGI_ENV) && defined(AFS_SGI_SHORTSTACK)
1358 * sgi - daemon - handles certain operations that otherwise
1359 * would use up too much kernel stack space
1361 * This all assumes that since the caller must have the xdcache lock
1362 * exclusively that the list will never be more than one long
1363 * and noone else can attempt to add anything until we're done.
1365 SV_TYPE afs_sgibksync;
1366 SV_TYPE afs_sgibkwait;
1367 lock_t afs_sgibklock;
1368 struct dcache *afs_sgibklist;
1376 if (afs_sgibklock == NULL) {
1377 SV_INIT(&afs_sgibksync, "bksync", 0, 0);
1378 SV_INIT(&afs_sgibkwait, "bkwait", 0, 0);
1379 SPINLOCK_INIT(&afs_sgibklock, "bklock");
1381 s = SPLOCK(afs_sgibklock);
1383 /* wait for something to do */
1384 SP_WAIT(afs_sgibklock, s, &afs_sgibksync, PINOD);
1385 osi_Assert(afs_sgibklist);
1387 /* XX will probably need to generalize to real list someday */
1388 s = SPLOCK(afs_sgibklock);
1389 while (afs_sgibklist) {
1390 tdc = afs_sgibklist;
1391 afs_sgibklist = NULL;
1392 SPUNLOCK(afs_sgibklock, s);
1394 tdc->dflags &= ~DFEntryMod;
1395 afs_WriteDCache(tdc, 1);
1397 s = SPLOCK(afs_sgibklock);
1400 /* done all the work - wake everyone up */
1401 while (SV_SIGNAL(&afs_sgibkwait));