2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "afs/param.h"
18 #include <sys/sleep.h>
21 #include "afs/sysincludes.h" /* Standard vendor system headers */
22 #include "afsincludes.h" /* Afs-based standard headers */
23 #include "afs/afs_stats.h" /* statistics gathering code */
24 #include "afs/afs_cbqueue.h"
26 #include <sys/adspace.h> /* for vm_att(), vm_det() */
30 /* background request queue size */
31 afs_lock_t afs_xbrs; /* lock for brs */
32 static int brsInit = 0;
33 short afs_brsWaiters = 0; /* number of users waiting for brs buffers */
34 short afs_brsDaemons = 0; /* number of daemons waiting for brs requests */
35 struct brequest afs_brs[NBRS]; /* request structures */
36 struct afs_osi_WaitHandle AFS_WaitHandler, AFS_CSWaitHandler;
37 static int afs_brs_count = 0; /* request counter, to service reqs in order */
38 #ifdef AFS_DISABLE_BKG
39 int disable_bkg = AFS_DISABLE_BKG;
42 static int rxepoch_checked = 0;
43 #define afs_CheckRXEpoch() {if (rxepoch_checked == 0 && rxkad_EpochWasSet) { \
44 rxepoch_checked = 1; afs_GCUserData(/* force flag */ 1); } }
46 /* PAG garbage collection */
47 /* We induce a compile error if param.h does not define AFS_GCPAGS */
48 afs_int32 afs_gcpags = AFS_GCPAGS;
49 afs_int32 afs_gcpags_procsize = 0;
51 afs_int32 afs_CheckServerDaemonStarted = 0;
52 #ifdef DEFAULT_PROBE_INTERVAL
53 afs_int32 PROBE_INTERVAL = DEFAULT_PROBE_INTERVAL; /* overridding during compile */
55 afs_int32 PROBE_INTERVAL = 180; /* default to 3 min */
58 #define PROBE_WAIT() (1000 * (PROBE_INTERVAL - ((afs_random() & 0x7fffffff) \
59 % (PROBE_INTERVAL/2))))
62 afs_CheckServerDaemon(void)
64 afs_int32 now, delay, lastCheck, last10MinCheck;
66 afs_CheckServerDaemonStarted = 1;
68 while (afs_initState < 101)
69 afs_osi_Sleep(&afs_initState);
70 afs_osi_Wait(PROBE_WAIT(), &AFS_CSWaitHandler, 0);
72 last10MinCheck = lastCheck = osi_Time();
74 if (afs_termState == AFSOP_STOP_CS) {
75 afs_termState = AFSOP_STOP_BKG;
76 afs_osi_Wakeup(&afs_termState);
81 if (PROBE_INTERVAL + lastCheck <= now) {
82 afs_CheckServers(1, NULL); /* check down servers */
83 lastCheck = now = osi_Time();
86 if (600 + last10MinCheck <= now) {
87 afs_Trace1(afs_iclSetp, CM_TRACE_PROBEUP, ICL_TYPE_INT32, 600);
88 afs_CheckServers(0, NULL);
89 last10MinCheck = now = osi_Time();
92 if (afs_termState == AFSOP_STOP_CS) {
93 afs_termState = AFSOP_STOP_BKG;
94 afs_osi_Wakeup(&afs_termState);
98 /* Compute time to next probe. */
99 delay = PROBE_INTERVAL + lastCheck;
100 if (delay > 600 + last10MinCheck)
101 delay = 600 + last10MinCheck;
105 afs_osi_Wait(delay * 1000, &AFS_CSWaitHandler, 0);
107 afs_CheckServerDaemonStarted = 0;
109 #define RECURSIVE_VFS_CONTEXT 1
110 #if RECURSIVE_VFS_CONTEXT
111 extern int vfs_context_ref;
113 #define vfs_context_ref 1
119 struct afs_exporter *exporter;
121 afs_int32 last3MinCheck, last10MinCheck, last60MinCheck, lastNMinCheck;
122 afs_int32 last1MinCheck;
123 afs_uint32 lastCBSlotBump;
126 AFS_STATCNT(afs_Daemon);
127 last1MinCheck = last3MinCheck = last60MinCheck = last10MinCheck =
130 afs_rootFid.Fid.Volume = 0;
131 while (afs_initState < 101)
132 afs_osi_Sleep(&afs_initState);
134 #ifdef AFS_DARWIN80_ENV
135 if (afs_osi_ctxtp_initialized)
136 osi_Panic("vfs context already initialized");
137 while (afs_osi_ctxtp && vfs_context_ref)
138 afs_osi_Sleep(&afs_osi_ctxtp);
139 #if RECURSIVE_VFS_CONTEXT
140 if (afs_osi_ctxtp && !vfs_context_ref)
141 vfs_context_rele(afs_osi_ctxtp);
143 afs_osi_ctxtp = vfs_context_create(NULL);
144 afs_osi_ctxtp_initialized = 1;
147 lastCBSlotBump = now;
149 /* when a lot of clients are booted simultaneously, they develop
150 * annoying synchronous VL server bashing behaviors. So we stagger them.
152 last1MinCheck = now + ((afs_random() & 0x7fffffff) % 60); /* an extra 30 */
153 last3MinCheck = now - 90 + ((afs_random() & 0x7fffffff) % 180);
154 last60MinCheck = now - 1800 + ((afs_random() & 0x7fffffff) % 3600);
155 last10MinCheck = now - 300 + ((afs_random() & 0x7fffffff) % 600);
156 lastNMinCheck = now - 90 + ((afs_random() & 0x7fffffff) % 180);
158 /* start off with afs_initState >= 101 (basic init done) */
160 afs_CheckCallbacks(20); /* unstat anything which will expire soon */
162 /* things to do every 20 seconds or less - required by protocol spec */
164 afs_FlushActiveVcaches(0); /* flush NFS writes */
165 afs_FlushVCBs(1); /* flush queued callbacks */
166 afs_MaybeWakeupTruncateDaemon(); /* free cache space if have too */
167 rx_CheckPackets(); /* Does RX need more packets? */
170 if (lastCBSlotBump + CBHTSLOTLEN < now) { /* pretty time-dependant */
171 lastCBSlotBump = now;
172 if (afs_BumpBase()) {
173 afs_CheckCallbacks(20); /* unstat anything which will expire soon */
177 if (last1MinCheck + 60 < now) {
178 /* things to do every minute */
179 DFlush(); /* write out dir buffers */
180 afs_WriteThroughDSlots(); /* write through cacheinfo entries */
181 ObtainWriteLock(&afs_xvcache, 736);
182 afs_FlushReclaimedVcaches();
183 ReleaseWriteLock(&afs_xvcache);
184 afs_FlushActiveVcaches(1); /* keep flocks held & flush nfs writes */
185 #ifdef AFS_DISCON_ENV
186 afs_StoreDirtyVcaches();
192 if (last3MinCheck + 180 < now) {
193 afs_CheckTokenCache(); /* check for access cache resets due to expired
197 if (!afs_CheckServerDaemonStarted) {
198 /* Do the check here if the correct afsd is not installed. */
201 printf("Please install afsd with check server daemon.\n");
203 if (lastNMinCheck + PROBE_INTERVAL < now) {
204 /* only check down servers */
205 afs_CheckServers(1, NULL);
209 if (last10MinCheck + 600 < now) {
210 #ifdef AFS_USERSPACE_IP_ADDR
211 extern int rxi_GetcbiInfo(void);
213 afs_Trace1(afs_iclSetp, CM_TRACE_PROBEUP, ICL_TYPE_INT32, 600);
214 #ifdef AFS_USERSPACE_IP_ADDR
215 if (rxi_GetcbiInfo()) { /* addresses changed from last time */
218 #else /* AFS_USERSPACE_IP_ADDR */
219 if (rxi_GetIFInfo()) { /* addresses changed from last time */
222 #endif /* else AFS_USERSPACE_IP_ADDR */
223 if (!afs_CheckServerDaemonStarted)
224 afs_CheckServers(0, NULL);
225 afs_GCUserData(0); /* gc old conns */
226 /* This is probably the wrong way of doing GC for the various exporters but it will suffice for a while */
227 for (exporter = root_exported; exporter;
228 exporter = exporter->exp_next) {
229 (void)EXP_GC(exporter, 0); /* Generalize params */
234 afs_CheckVolumeNames(AFS_VOLCHECK_EXPIRED |
238 afs_CheckVolumeNames(AFS_VOLCHECK_EXPIRED |
243 last10MinCheck = now;
245 if (last60MinCheck + 3600 < now) {
246 afs_Trace1(afs_iclSetp, CM_TRACE_PROBEVOLUME, ICL_TYPE_INT32,
248 afs_CheckRootVolume();
250 if (afs_gcpags == AFS_GCPAGS_OK) {
255 last60MinCheck = now;
257 if (afs_initState < 300) { /* while things ain't rosy */
258 code = afs_CheckRootVolume();
260 afs_initState = 300; /* succeeded */
261 if (afs_initState < 200)
262 afs_initState = 200; /* tried once */
263 afs_osi_Wakeup(&afs_initState);
266 /* 18285 is because we're trying to divide evenly into 128, that is,
267 * CBSlotLen, while staying just under 20 seconds. If CBSlotLen
268 * changes, should probably change this interval, too.
269 * Some of the preceding actions may take quite some time, so we
270 * might not want to wait the entire interval */
271 now = 18285 - (osi_Time() - now);
273 afs_osi_Wait(now, &AFS_WaitHandler, 0);
276 if (afs_termState == AFSOP_STOP_AFS) {
277 if (afs_CheckServerDaemonStarted)
278 afs_termState = AFSOP_STOP_CS;
280 afs_termState = AFSOP_STOP_BKG;
281 afs_osi_Wakeup(&afs_termState);
288 afs_CheckRootVolume(void)
290 char rootVolName[32];
291 struct volume *tvp = NULL;
292 int usingDynroot = afs_GetDynrootEnable();
295 AFS_STATCNT(afs_CheckRootVolume);
296 if (*afs_rootVolumeName == 0) {
297 strcpy(rootVolName, "root.afs");
299 strcpy(rootVolName, afs_rootVolumeName);
303 afs_GetDynrootFid(&afs_rootFid);
304 tvp = afs_GetVolume(&afs_rootFid, NULL, READ_LOCK);
306 struct cell *lc = afs_GetPrimaryCell(READ_LOCK);
310 localcell = lc->cellNum;
311 afs_PutCell(lc, READ_LOCK);
312 tvp = afs_GetVolumeByName(rootVolName, localcell, 1, NULL, READ_LOCK);
315 int len = strlen(rootVolName);
317 if ((len < 9) || strcmp(&rootVolName[len - 9], ".readonly")) {
318 strcpy(buf, rootVolName);
319 afs_strcat(buf, ".readonly");
320 tvp = afs_GetVolumeByName(buf, localcell, 1, NULL, READ_LOCK);
324 int volid = (tvp->roVol ? tvp->roVol : tvp->volume);
325 afs_rootFid.Cell = localcell;
326 if (afs_rootFid.Fid.Volume && afs_rootFid.Fid.Volume != volid
328 struct vcache *tvc = afs_globalVp;
329 /* If we had a root fid before and it changed location we reset
330 * the afs_globalVp so that it will be reevaluated.
331 * Just decrement the reference count. This only occurs during
332 * initial cell setup and can panic the machine if we set the
333 * count to zero and fs checkv is executed when the current
336 #ifdef AFS_LINUX20_ENV
338 struct vrequest treq;
344 afs_rootFid.Fid.Volume = volid;
345 afs_rootFid.Fid.Vnode = 1;
346 afs_rootFid.Fid.Unique = 1;
349 if (afs_InitReq(&treq, credp))
351 vcp = afs_GetVCache(&afs_rootFid, &treq, NULL, NULL);
354 afs_getattr(vcp, &vattr, credp);
355 afs_fill_inode(AFSTOV(vcp), &vattr);
357 dp = d_find_alias(AFSTOV(afs_globalVp));
359 #if defined(AFS_LINUX24_ENV)
360 spin_lock(&dcache_lock);
361 #if defined(AFS_LINUX26_ENV)
362 spin_lock(&dp->d_lock);
365 list_del_init(&dp->d_alias);
366 list_add(&dp->d_alias, &(AFSTOV(vcp)->i_dentry));
367 dp->d_inode = AFSTOV(vcp);
368 #if defined(AFS_LINUX24_ENV)
369 #if defined(AFS_LINUX26_ENV)
370 spin_unlock(&dp->d_lock);
372 spin_unlock(&dcache_lock);
376 AFS_FAST_RELE(afs_globalVp);
382 #ifdef AFS_DARWIN80_ENV
383 afs_PutVCache(afs_globalVp);
385 AFS_FAST_RELE(afs_globalVp);
390 afs_rootFid.Fid.Volume = volid;
391 afs_rootFid.Fid.Vnode = 1;
392 afs_rootFid.Fid.Unique = 1;
396 afs_initState = 300; /* won */
397 afs_osi_Wakeup(&afs_initState);
398 afs_PutVolume(tvp, READ_LOCK);
400 if (afs_rootFid.Fid.Volume)
406 /* ptr_parm 0 is the pathname, size_parm 0 to the fetch is the chunk number */
408 BPath(register struct brequest *ab)
410 register struct dcache *tdc = NULL;
411 struct vcache *tvc = NULL;
412 struct vnode *tvn = NULL;
413 #ifdef AFS_LINUX22_ENV
414 struct dentry *dp = NULL;
416 afs_size_t offset, len;
417 struct vrequest treq;
421 if ((code = afs_InitReq(&treq, ab->cred)))
424 #ifdef AFS_LINUX22_ENV
425 code = gop_lookupname((char *)ab->ptr_parm[0], AFS_UIOSYS, 1, &dp);
427 tvn = (struct vnode *)dp->d_inode;
429 code = gop_lookupname((char *)ab->ptr_parm[0], AFS_UIOSYS, 1, &tvn);
432 osi_FreeLargeSpace((char *)ab->ptr_parm[0]); /* free path name buffer here */
435 /* now path may not have been in afs, so check that before calling our cache manager */
436 if (!tvn || !IsAfsVnode(tvn)) {
437 /* release it and give up */
439 #ifdef AFS_LINUX22_ENV
448 /* here we know its an afs vnode, so we can get the data for the chunk */
449 tdc = afs_GetDCache(tvc, ab->size_parm[0], &treq, &offset, &len, 1);
453 #ifdef AFS_LINUX22_ENV
460 /* size_parm 0 to the fetch is the chunk number,
461 * ptr_parm 0 is the dcache entry to wakeup,
462 * size_parm 1 is true iff we should release the dcache entry here.
465 BPrefetch(register struct brequest *ab)
467 register struct dcache *tdc;
468 register struct vcache *tvc;
469 afs_size_t offset, len;
470 struct vrequest treq;
472 AFS_STATCNT(BPrefetch);
473 if ((len = afs_InitReq(&treq, ab->cred)))
476 tdc = afs_GetDCache(tvc, ab->size_parm[0], &treq, &offset, &len, 1);
480 /* now, dude may be waiting for us to clear DFFetchReq bit; do so. Can't
481 * use tdc from GetDCache since afs_GetDCache may fail, but someone may
482 * be waiting for our wakeup anyway.
484 tdc = (struct dcache *)(ab->ptr_parm[0]);
485 ObtainSharedLock(&tdc->lock, 640);
486 if (tdc->mflags & DFFetchReq) {
487 UpgradeSToWLock(&tdc->lock, 641);
488 tdc->mflags &= ~DFFetchReq;
489 ReleaseWriteLock(&tdc->lock);
491 ReleaseSharedLock(&tdc->lock);
493 afs_osi_Wakeup(&tdc->validPos);
494 if (ab->size_parm[1]) {
495 afs_PutDCache(tdc); /* put this one back, too */
501 BStore(register struct brequest *ab)
503 register struct vcache *tvc;
504 register afs_int32 code;
505 struct vrequest treq;
506 #if defined(AFS_SGI_ENV)
507 struct cred *tmpcred;
511 if ((code = afs_InitReq(&treq, ab->cred)))
515 #if defined(AFS_SGI_ENV)
517 * Since StoreOnLastReference can end up calling osi_SyncVM which
518 * calls into VM code that assumes that u.u_cred has the
519 * correct credentials, we set our to theirs for this xaction
521 tmpcred = OSI_GET_CURRENT_CRED();
522 OSI_SET_CURRENT_CRED(ab->cred);
525 * To avoid recursion since the WriteLock may be released during VM
526 * operations, we hold the VOP_RWLOCK across this transaction as
527 * do the other callers of StoreOnLastReference
529 AFS_RWLOCK((vnode_t *) tvc, 1);
531 ObtainWriteLock(&tvc->lock, 209);
532 code = afs_StoreOnLastReference(tvc, &treq);
533 ReleaseWriteLock(&tvc->lock);
534 #if defined(AFS_SGI_ENV)
535 OSI_SET_CURRENT_CRED(tmpcred);
536 AFS_RWUNLOCK((vnode_t *) tvc, 1);
538 /* now set final return code, and wakeup anyone waiting */
539 if ((ab->flags & BUVALID) == 0) {
540 ab->code = afs_CheckCode(code, &treq, 43); /* set final code, since treq doesn't go across processes */
541 ab->flags |= BUVALID;
542 if (ab->flags & BUWAIT) {
543 ab->flags &= ~BUWAIT;
549 /* release a held request buffer */
551 afs_BRelease(register struct brequest *ab)
554 AFS_STATCNT(afs_BRelease);
555 MObtainWriteLock(&afs_xbrs, 294);
556 if (--ab->refCount <= 0) {
560 afs_osi_Wakeup(&afs_brsWaiters);
561 MReleaseWriteLock(&afs_xbrs);
564 /* return true if bkg fetch daemons are all busy */
568 AFS_STATCNT(afs_BBusy);
569 #ifdef AFS_DISABLE_BKG
573 if (afs_brsDaemons > 0)
579 afs_BQueue(register short aopcode, register struct vcache *avc,
580 afs_int32 dontwait, afs_int32 ause, struct AFS_UCRED *acred,
581 afs_size_t asparm0, afs_size_t asparm1, void *apparm0)
584 register struct brequest *tb;
586 AFS_STATCNT(afs_BQueue);
587 MObtainWriteLock(&afs_xbrs, 296);
590 for (i = 0; i < NBRS; i++, tb++) {
591 if (tb->refCount == 0)
596 tb->opcode = aopcode;
601 VN_HOLD(AFSTOV(avc));
603 tb->refCount = ause + 1;
604 tb->size_parm[0] = asparm0;
605 tb->size_parm[1] = asparm1;
606 tb->ptr_parm[0] = apparm0;
609 tb->ts = afs_brs_count++;
610 /* if daemons are waiting for work, wake them up */
611 if (afs_brsDaemons > 0) {
612 afs_osi_Wakeup(&afs_brsDaemons);
614 MReleaseWriteLock(&afs_xbrs);
618 MReleaseWriteLock(&afs_xbrs);
621 /* no free buffers, sleep a while */
623 MReleaseWriteLock(&afs_xbrs);
624 afs_osi_Sleep(&afs_brsWaiters);
625 MObtainWriteLock(&afs_xbrs, 301);
631 /* AIX 4.1 has a much different sleep/wakeup mechanism available for use.
632 * The modifications here will work for either a UP or MP machine.
634 struct buf *afs_asyncbuf = (struct buf *)0;
635 tid_t afs_asyncbuf_cv = EVENT_NULL;
636 afs_int32 afs_biodcnt = 0;
638 /* in implementing this, I assumed that all external linked lists were
641 * Several places in this code traverse a linked list. The algorithm
642 * used here is probably unfamiliar to most people. Careful examination
643 * will show that it eliminates an assignment inside the loop, as compared
644 * to the standard algorithm, at the cost of occasionally using an extra
650 * This function obtains, and returns, a pointer to a buffer for
651 * processing by a daemon. It sleeps until such a buffer is available.
652 * The source of buffers for it is the list afs_asyncbuf (see also
653 * afs_gn_strategy). This function may be invoked concurrently by
654 * several processes, that is, several instances of the same daemon.
655 * afs_gn_strategy, which adds buffers to the list, runs at interrupt
656 * level, while get_bioreq runs at process level.
658 * Since AIX 4.1 can wake just one process at a time, the separate sleep
659 * addresses have been removed.
660 * Note that the kernel_lock is held until the e_sleep_thread() occurs.
661 * The afs_asyncbuf_lock is primarily used to serialize access between
662 * process and interrupts.
664 Simple_lock afs_asyncbuf_lock;
668 struct buf *bp = NULL;
670 struct buf **bestlbpP, **lbpP;
672 struct buf *t1P, *t2P; /* temp pointers for list manipulation */
675 struct afs_bioqueue *s;
677 /* ??? Does the forward pointer of the returned buffer need to be NULL?
680 /* Disable interrupts from the strategy function, and save the
681 * prior priority level and lock access to the afs_asyncbuf.
684 oldPriority = disable_lock(INTMAX, &afs_asyncbuf_lock);
688 /* look for oldest buffer */
689 bp = bestbp = afs_asyncbuf;
690 bestage = (long)bestbp->av_back;
691 bestlbpP = &afs_asyncbuf;
697 if ((long)bp->av_back - bestage < 0) {
700 bestage = (long)bp->av_back;
704 *bestlbpP = bp->av_forw;
707 /* If afs_asyncbuf is null, it is necessary to go to sleep.
708 * e_wakeup_one() ensures that only one thread wakes.
711 /* The LOCK_HANDLER indicates to e_sleep_thread to only drop the
712 * lock on an MP machine.
715 e_sleep_thread(&afs_asyncbuf_cv, &afs_asyncbuf_lock,
716 LOCK_HANDLER | INTERRUPTIBLE);
717 if (interrupted == THREAD_INTERRUPTED) {
718 /* re-enable interrupts from strategy */
719 unlock_enable(oldPriority, &afs_asyncbuf_lock);
723 } /* end of "else asyncbuf is empty" */
724 } /* end of "inner loop" */
728 unlock_enable(oldPriority, &afs_asyncbuf_lock);
731 /* For the convenience of other code, replace the gnodes in
732 * the b_vp field of bp and the other buffers on the b_work
733 * chain with the corresponding vnodes.
735 * ??? what happens to the gnodes? They're not just cut loose,
739 t2P = (struct buf *)t1P->b_work;
740 t1P->b_vp = ((struct gnode *)t1P->b_vp)->gn_vnode;
744 t1P = (struct buf *)t2P->b_work;
745 t2P->b_vp = ((struct gnode *)t2P->b_vp)->gn_vnode;
750 /* If the buffer does not specify I/O, it may immediately
751 * be returned to the caller. This condition is detected
752 * by examining the buffer's flags (the b_flags field). If
753 * the B_PFPROT bit is set, the buffer represents a protection
754 * violation, rather than a request for I/O. The remainder
755 * of the outer loop handles the case where the B_PFPROT bit is clear.
757 if (bp->b_flags & B_PFPROT) {
762 } /* end of function get_bioreq() */
767 * This function is the daemon. It is called from the syscall
768 * interface. Ordinarily, a script or an administrator will run a
769 * daemon startup utility, specifying the number of I/O daemons to
770 * run. The utility will fork off that number of processes,
771 * each making the appropriate syscall, which will cause this
772 * function to be invoked.
774 static int afs_initbiod = 0; /* this is self-initializing code */
777 afs_BioDaemon(afs_int32 nbiods)
779 afs_int32 code, s, pflg = 0;
781 struct buf *bp, *bp1, *tbp1, *tbp2; /* temp pointers only */
789 /* pin lock, since we'll be using it in an interrupt. */
790 lock_alloc(&afs_asyncbuf_lock, LOCK_ALLOC_PIN, 2, 1);
791 simple_lock_init(&afs_asyncbuf_lock);
792 pin(&afs_asyncbuf, sizeof(struct buf *));
793 pin(&afs_asyncbuf_cv, sizeof(afs_int32));
796 /* Ignore HUP signals... */
798 sigset_t sigbits, osigbits;
800 * add SIGHUP to the set of already masked signals
802 SIGFILLSET(sigbits); /* allow all signals */
803 SIGDELSET(sigbits, SIGHUP); /* except SIGHUP */
804 limit_sigs(&sigbits, &osigbits); /* and already masked */
806 /* Main body starts here -- this is an intentional infinite loop, and
809 * Now, the loop will exit if get_bioreq() returns NULL, indicating
810 * that we've been interrupted.
813 bp = afs_get_bioreq();
815 break; /* we were interrupted */
816 if (code = setjmpx(&jmpbuf)) {
817 /* This should not have happend, maybe a lack of resources */
819 s = disable_lock(INTMAX, &afs_asyncbuf_lock);
820 for (bp1 = bp; bp; bp = bp1) {
822 bp1 = (struct buf *)bp1->b_work;
825 bp->b_flags |= B_ERROR;
828 unlock_enable(s, &afs_asyncbuf_lock);
832 vcp = VTOAFS(bp->b_vp);
833 if (bp->b_flags & B_PFSTORE) { /* XXXX */
834 ObtainWriteLock(&vcp->lock, 404);
835 if (vcp->v.v_gnode->gn_mwrcnt) {
836 afs_offs_t newlength =
837 (afs_offs_t) dbtob(bp->b_blkno) + bp->b_bcount;
838 if (vcp->m.Length < newlength) {
839 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH,
840 ICL_TYPE_STRING, __FILE__, ICL_TYPE_LONG,
841 __LINE__, ICL_TYPE_OFFSET,
842 ICL_HANDLE_OFFSET(vcp->m.Length),
843 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(newlength));
844 vcp->m.Length = newlength;
847 ReleaseWriteLock(&vcp->lock);
849 /* If the buffer represents a protection violation, rather than
850 * an actual request for I/O, no special action need be taken.
852 if (bp->b_flags & B_PFPROT) {
853 iodone(bp); /* Notify all users of the buffer that we're done */
858 ObtainWriteLock(&vcp->pvmlock, 211);
860 * First map its data area to a region in the current address space
861 * by calling vm_att with the subspace identifier, and a pointer to
862 * the data area. vm_att returns a new data area pointer, but we
863 * also want to hang onto the old one.
865 tmpaddr = bp->b_baddr;
866 bp->b_baddr = (caddr_t) vm_att(bp->b_xmemd.subspace_id, tmpaddr);
867 tmperr = afs_ustrategy(bp); /* temp variable saves offset calculation */
868 if (tmperr) { /* in non-error case */
869 bp->b_flags |= B_ERROR; /* should other flags remain set ??? */
870 bp->b_error = tmperr;
873 /* Unmap the buffer's data area by calling vm_det. Reset data area
874 * to the value that we saved above.
877 bp->b_baddr = tmpaddr;
880 * buffer may be linked with other buffers via the b_work field.
881 * See also afs_gn_strategy. For each buffer in the chain (including
882 * bp) notify all users of the buffer that the daemon is finished
883 * using it by calling iodone.
884 * assumes iodone can modify the b_work field.
887 tbp2 = (struct buf *)tbp1->b_work;
892 tbp1 = (struct buf *)tbp2->b_work;
898 ReleaseWriteLock(&vcp->pvmlock); /* Unlock the vnode. */
900 } /* infinite loop (unless we're interrupted) */
901 } /* end of afs_BioDaemon() */
903 #endif /* AFS_AIX41_ENV */
908 afs_BackgroundDaemon(void)
913 AFS_STATCNT(afs_BackgroundDaemon);
914 /* initialize subsystem */
916 LOCK_INIT(&afs_xbrs, "afs_xbrs");
917 memset((char *)afs_brs, 0, sizeof(afs_brs));
919 #if defined (AFS_SGI_ENV) && defined(AFS_SGI_SHORTSTACK)
921 * steal the first daemon for doing delayed DSlot flushing
922 * (see afs_GetDownDSlot)
931 MObtainWriteLock(&afs_xbrs, 302);
934 struct brequest *min_tb = NULL;
936 if (afs_termState == AFSOP_STOP_BKG) {
938 afs_termState = AFSOP_STOP_TRUNCDAEMON;
939 MReleaseWriteLock(&afs_xbrs);
940 afs_osi_Wakeup(&afs_termState);
947 for (i = 0; i < NBRS; i++, tb++) {
948 /* look for request with smallest ts */
949 if ((tb->refCount > 0) && !(tb->flags & BSTARTED)) {
950 /* new request, not yet picked up */
951 if ((min_tb && (min_ts - tb->ts > 0)) || !min_tb) {
958 /* claim and process this request */
959 tb->flags |= BSTARTED;
960 MReleaseWriteLock(&afs_xbrs);
962 afs_Trace1(afs_iclSetp, CM_TRACE_BKG1, ICL_TYPE_INT32,
964 if (tb->opcode == BOP_FETCH)
966 else if (tb->opcode == BOP_STORE)
968 else if (tb->opcode == BOP_PATH)
971 panic("background bop");
973 AFS_RELE(AFSTOV(tb->vc)); /* MUST call vnode layer or could lose vnodes */
978 tb->cred = (struct AFS_UCRED *)0;
980 afs_BRelease(tb); /* this grabs and releases afs_xbrs lock */
981 MObtainWriteLock(&afs_xbrs, 305);
984 /* wait for new request */
986 MReleaseWriteLock(&afs_xbrs);
987 afs_osi_Sleep(&afs_brsDaemons);
988 MObtainWriteLock(&afs_xbrs, 307);
996 shutdown_daemons(void)
998 AFS_STATCNT(shutdown_daemons);
999 if (afs_cold_shutdown) {
1000 afs_brsDaemons = brsInit = 0;
1001 rxepoch_checked = afs_nbrs = 0;
1002 memset((char *)afs_brs, 0, sizeof(afs_brs));
1003 memset((char *)&afs_xbrs, 0, sizeof(afs_lock_t));
1005 #ifdef AFS_AIX41_ENV
1006 lock_free(&afs_asyncbuf_lock);
1007 unpin(&afs_asyncbuf, sizeof(struct buf *));
1008 unpin(&afs_asyncbuf_cv, sizeof(afs_int32));
1014 #if defined(AFS_SGI_ENV) && defined(AFS_SGI_SHORTSTACK)
1016 * sgi - daemon - handles certain operations that otherwise
1017 * would use up too much kernel stack space
1019 * This all assumes that since the caller must have the xdcache lock
1020 * exclusively that the list will never be more than one long
1021 * and noone else can attempt to add anything until we're done.
1023 SV_TYPE afs_sgibksync;
1024 SV_TYPE afs_sgibkwait;
1025 lock_t afs_sgibklock;
1026 struct dcache *afs_sgibklist;
1034 if (afs_sgibklock == NULL) {
1035 SV_INIT(&afs_sgibksync, "bksync", 0, 0);
1036 SV_INIT(&afs_sgibkwait, "bkwait", 0, 0);
1037 SPINLOCK_INIT(&afs_sgibklock, "bklock");
1039 s = SPLOCK(afs_sgibklock);
1041 /* wait for something to do */
1042 SP_WAIT(afs_sgibklock, s, &afs_sgibksync, PINOD);
1043 osi_Assert(afs_sgibklist);
1045 /* XX will probably need to generalize to real list someday */
1046 s = SPLOCK(afs_sgibklock);
1047 while (afs_sgibklist) {
1048 tdc = afs_sgibklist;
1049 afs_sgibklist = NULL;
1050 SPUNLOCK(afs_sgibklock, s);
1052 tdc->dflags &= ~DFEntryMod;
1053 afs_WriteDCache(tdc, 1);
1055 s = SPLOCK(afs_sgibklock);
1058 /* done all the work - wake everyone up */
1059 while (SV_SIGNAL(&afs_sgibkwait));