2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "afs/param.h"
16 #include <sys/sleep.h>
19 #include "afs/sysincludes.h" /* Standard vendor system headers */
20 #include "afsincludes.h" /* Afs-based standard headers */
21 #include "afs/afs_stats.h" /* statistics gathering code */
22 #include "afs/afs_cbqueue.h"
24 #include <sys/adspace.h> /* for vm_att(), vm_det() */
27 #if defined(AFS_CACHE_BYPASS)
28 #include "afs/afs_bypasscache.h"
29 #endif// defined(AFS_CACHE_BYPASS)
30 /* background request queue size */
31 afs_lock_t afs_xbrs; /* lock for brs */
32 static int brsInit = 0;
33 short afs_brsWaiters = 0; /* number of users waiting for brs buffers */
34 short afs_brsDaemons = 0; /* number of daemons waiting for brs requests */
35 struct brequest afs_brs[NBRS]; /* request structures */
36 struct afs_osi_WaitHandle AFS_WaitHandler, AFS_CSWaitHandler;
37 static int afs_brs_count = 0; /* request counter, to service reqs in order */
39 static int rxepoch_checked = 0;
40 #define afs_CheckRXEpoch() {if (rxepoch_checked == 0 && rxkad_EpochWasSet) { \
41 rxepoch_checked = 1; afs_GCUserData(/* force flag */ 1); } }
43 /* PAG garbage collection */
44 /* We induce a compile error if param.h does not define AFS_GCPAGS */
45 afs_int32 afs_gcpags = AFS_GCPAGS;
46 afs_int32 afs_gcpags_procsize = 0;
48 afs_int32 afs_CheckServerDaemonStarted = 0;
49 #ifndef DEFAULT_PROBE_INTERVAL
50 #define DEFAULT_PROBE_INTERVAL 30 /* default to 3 min */
52 afs_int32 afs_probe_interval = DEFAULT_PROBE_INTERVAL;
53 afs_int32 afs_probe_all_interval = 600;
54 afs_int32 afs_nat_probe_interval = 60;
55 afs_int32 afs_preCache = 0;
57 #define PROBE_WAIT() (1000 * (afs_probe_interval - ((afs_random() & 0x7fffffff) \
58 % (afs_probe_interval/2))))
61 afs_SetCheckServerNATmode(int isnat)
63 static afs_int32 old_intvl, old_all_intvl;
66 if (isnat && !wasnat) {
67 old_intvl = afs_probe_interval;
68 old_all_intvl = afs_probe_all_interval;
69 afs_probe_interval = afs_nat_probe_interval;
70 afs_probe_all_interval = afs_nat_probe_interval;
71 afs_osi_CancelWait(&AFS_CSWaitHandler);
72 } else if (!isnat && wasnat) {
73 afs_probe_interval = old_intvl;
74 afs_probe_all_interval = old_all_intvl;
80 afs_CheckServerDaemon(void)
82 afs_int32 now, delay, lastCheck, last10MinCheck;
84 afs_CheckServerDaemonStarted = 1;
86 while (afs_initState < 101)
87 afs_osi_Sleep(&afs_initState);
88 afs_osi_Wait(PROBE_WAIT(), &AFS_CSWaitHandler, 0);
90 last10MinCheck = lastCheck = osi_Time();
92 if (afs_termState == AFSOP_STOP_CS) {
93 afs_termState = AFSOP_STOP_BKG;
94 afs_osi_Wakeup(&afs_termState);
99 if (afs_probe_interval + lastCheck <= now) {
100 afs_CheckServers(1, NULL); /* check down servers */
101 lastCheck = now = osi_Time();
104 if (afs_probe_all_interval + last10MinCheck <= now) {
105 afs_Trace1(afs_iclSetp, CM_TRACE_PROBEUP, ICL_TYPE_INT32, afs_probe_all_interval);
106 afs_CheckServers(0, NULL);
107 last10MinCheck = now = osi_Time();
109 /* shutdown check. */
110 if (afs_termState == AFSOP_STOP_CS) {
111 afs_termState = AFSOP_STOP_BKG;
112 afs_osi_Wakeup(&afs_termState);
116 /* Compute time to next probe. */
117 delay = afs_probe_interval + lastCheck;
118 if (delay > afs_probe_all_interval + last10MinCheck)
119 delay = afs_probe_all_interval + last10MinCheck;
123 afs_osi_Wait(delay * 1000, &AFS_CSWaitHandler, 0);
125 afs_CheckServerDaemonStarted = 0;
128 extern int vfs_context_ref;
130 /* This function always holds the GLOCK whilst it is running. The caller
131 * gets the GLOCK before invoking it, and afs_osi_Sleep drops the GLOCK
132 * whilst we are sleeping, and regains it when we're woken up.
138 struct afs_exporter *exporter;
140 afs_int32 last3MinCheck, last10MinCheck, last60MinCheck, lastNMinCheck;
141 afs_int32 last1MinCheck, last5MinCheck;
142 afs_uint32 lastCBSlotBump;
145 AFS_STATCNT(afs_Daemon);
146 last1MinCheck = last3MinCheck = last60MinCheck = last10MinCheck =
147 last5MinCheck = lastNMinCheck = 0;
149 afs_rootFid.Fid.Volume = 0;
150 while (afs_initState < 101)
151 afs_osi_Sleep(&afs_initState);
153 #ifdef AFS_DARWIN80_ENV
154 if (afs_osi_ctxtp_initialized)
155 osi_Panic("vfs context already initialized");
156 while (afs_osi_ctxtp && vfs_context_ref)
157 afs_osi_Sleep(&afs_osi_ctxtp);
158 if (afs_osi_ctxtp && !vfs_context_ref)
159 vfs_context_rele(afs_osi_ctxtp);
160 afs_osi_ctxtp = vfs_context_create(NULL);
161 afs_osi_ctxtp_initialized = 1;
164 lastCBSlotBump = now;
166 /* when a lot of clients are booted simultaneously, they develop
167 * annoying synchronous VL server bashing behaviors. So we stagger them.
169 last1MinCheck = now + ((afs_random() & 0x7fffffff) % 60); /* an extra 30 */
170 last3MinCheck = now - 90 + ((afs_random() & 0x7fffffff) % 180);
171 last60MinCheck = now - 1800 + ((afs_random() & 0x7fffffff) % 3600);
172 last10MinCheck = now - 300 + ((afs_random() & 0x7fffffff) % 600);
173 last5MinCheck = now - 150 + ((afs_random() & 0x7fffffff) % 300);
174 lastNMinCheck = now - 90 + ((afs_random() & 0x7fffffff) % 180);
176 /* start off with afs_initState >= 101 (basic init done) */
178 afs_CheckCallbacks(20); /* unstat anything which will expire soon */
180 /* things to do every 20 seconds or less - required by protocol spec */
182 afs_FlushActiveVcaches(0); /* flush NFS writes */
183 afs_FlushVCBs(1); /* flush queued callbacks */
185 #if defined(AFS_NBSD50_ENV)
189 c1 = ISAFS_GLOCK(); /* this thread owns the GLOCK */
191 c2 = mutex_tryenter(&afs_global_mtx); /* not held either */
197 printf("afs_daemons periodic glock check: curthread owns glock %s; "
198 "glock held somewhere %s\n",
199 c1 ? "true" : "false",
200 c2 ? "true" : "false");
204 afs_MaybeWakeupTruncateDaemon(); /* free cache space if have too */
205 rx_CheckPackets(); /* Does RX need more packets? */
208 if (lastCBSlotBump + CBHTSLOTLEN < now) { /* pretty time-dependant */
209 lastCBSlotBump = now;
210 if (afs_BumpBase()) {
211 afs_CheckCallbacks(20); /* unstat anything which will expire soon */
215 if (last1MinCheck + 60 < now) {
216 /* things to do every minute */
217 DFlush(); /* write out dir buffers */
218 afs_WriteThroughDSlots(); /* write through cacheinfo entries */
219 ObtainWriteLock(&afs_xvcache, 736);
220 afs_FlushReclaimedVcaches();
221 ReleaseWriteLock(&afs_xvcache);
222 afs_FlushActiveVcaches(1); /* keep flocks held & flush nfs writes */
224 afs_StoreDirtyVcaches();
230 if (last3MinCheck + 180 < now) {
231 afs_CheckTokenCache(); /* check for access cache resets due to expired
236 if (afsd_dynamic_vcaches && (last5MinCheck + 300 < now)) {
237 /* start with trying to drop us back to our base usage */
239 if (afs_maxvcount <= afs_cacheStats)
240 anumber = VCACHE_FREE;
242 anumber = VCACHE_FREE + (afs_maxvcount - afs_cacheStats);
244 ObtainWriteLock(&afs_xvcache, 734);
245 afs_ShakeLooseVCaches(anumber);
246 ReleaseWriteLock(&afs_xvcache);
250 if (!afs_CheckServerDaemonStarted) {
251 /* Do the check here if the correct afsd is not installed. */
254 afs_warn("Please install afsd with check server daemon.\n");
256 if (lastNMinCheck + afs_probe_interval < now) {
257 /* only check down servers */
258 afs_CheckServers(1, NULL);
262 if (last10MinCheck + 600 < now) {
263 #ifdef AFS_USERSPACE_IP_ADDR
264 extern int rxi_GetcbiInfo(void);
266 afs_Trace1(afs_iclSetp, CM_TRACE_PROBEUP, ICL_TYPE_INT32, 600);
267 #ifdef AFS_USERSPACE_IP_ADDR
268 if (rxi_GetcbiInfo()) { /* addresses changed from last time */
271 #else /* AFS_USERSPACE_IP_ADDR */
272 if (rxi_GetIFInfo()) { /* addresses changed from last time */
275 #endif /* else AFS_USERSPACE_IP_ADDR */
276 if (!afs_CheckServerDaemonStarted)
277 afs_CheckServers(0, NULL);
278 afs_GCUserData(0); /* gc old conns */
279 /* This is probably the wrong way of doing GC for the various exporters but it will suffice for a while */
280 for (exporter = root_exported; exporter;
281 exporter = exporter->exp_next) {
282 (void)EXP_GC(exporter, 0); /* Generalize params */
287 afs_CheckVolumeNames(AFS_VOLCHECK_EXPIRED |
291 afs_CheckVolumeNames(AFS_VOLCHECK_EXPIRED |
296 last10MinCheck = now;
298 if (last60MinCheck + 3600 < now) {
299 afs_Trace1(afs_iclSetp, CM_TRACE_PROBEVOLUME, ICL_TYPE_INT32,
301 afs_CheckRootVolume();
303 if (afs_gcpags == AFS_GCPAGS_OK) {
308 last60MinCheck = now;
310 if (afs_initState < 300) { /* while things ain't rosy */
311 code = afs_CheckRootVolume();
313 afs_initState = 300; /* succeeded */
314 if (afs_initState < 200)
315 afs_initState = 200; /* tried once */
316 afs_osi_Wakeup(&afs_initState);
319 /* 18285 is because we're trying to divide evenly into 128, that is,
320 * CBSlotLen, while staying just under 20 seconds. If CBSlotLen
321 * changes, should probably change this interval, too.
322 * Some of the preceding actions may take quite some time, so we
323 * might not want to wait the entire interval */
324 now = 18285 - (osi_Time() - now);
326 afs_osi_Wait(now, &AFS_WaitHandler, 0);
329 if (afs_termState == AFSOP_STOP_AFS) {
330 if (afs_CheckServerDaemonStarted)
331 afs_termState = AFSOP_STOP_CS;
333 afs_termState = AFSOP_STOP_BKG;
334 afs_osi_Wakeup(&afs_termState);
341 afs_CheckRootVolume(void)
343 char rootVolName[32];
344 struct volume *tvp = NULL;
345 int usingDynroot = afs_GetDynrootEnable();
348 AFS_STATCNT(afs_CheckRootVolume);
349 if (*afs_rootVolumeName == 0) {
350 strcpy(rootVolName, "root.afs");
352 strcpy(rootVolName, afs_rootVolumeName);
356 afs_GetDynrootFid(&afs_rootFid);
357 tvp = afs_GetVolume(&afs_rootFid, NULL, READ_LOCK);
359 struct cell *lc = afs_GetPrimaryCell(READ_LOCK);
363 localcell = lc->cellNum;
364 afs_PutCell(lc, READ_LOCK);
365 tvp = afs_GetVolumeByName(rootVolName, localcell, 1, NULL, READ_LOCK);
368 int len = strlen(rootVolName);
370 if ((len < 9) || strcmp(&rootVolName[len - 9], ".readonly")) {
371 strcpy(buf, rootVolName);
372 afs_strcat(buf, ".readonly");
373 tvp = afs_GetVolumeByName(buf, localcell, 1, NULL, READ_LOCK);
377 int volid = (tvp->roVol ? tvp->roVol : tvp->volume);
378 afs_rootFid.Cell = localcell;
379 if (afs_rootFid.Fid.Volume && afs_rootFid.Fid.Volume != volid
381 /* If we had a root fid before and it changed location we reset
382 * the afs_globalVp so that it will be reevaluated.
383 * Just decrement the reference count. This only occurs during
384 * initial cell setup and can panic the machine if we set the
385 * count to zero and fs checkv is executed when the current
388 #ifdef AFS_LINUX20_ENV
390 struct vrequest treq;
396 afs_rootFid.Fid.Volume = volid;
397 afs_rootFid.Fid.Vnode = 1;
398 afs_rootFid.Fid.Unique = 1;
401 if (afs_InitReq(&treq, credp))
403 vcp = afs_GetVCache(&afs_rootFid, &treq, NULL, NULL);
406 afs_getattr(vcp, &vattr, credp);
407 afs_fill_inode(AFSTOV(vcp), &vattr);
409 dp = d_find_alias(AFSTOV(afs_globalVp));
411 #if defined(AFS_LINUX24_ENV)
412 #if defined(HAVE_DCACHE_LOCK)
413 spin_lock(&dcache_lock);
415 spin_lock(&AFSTOV(vcp)->i_lock);
417 #if defined(AFS_LINUX26_ENV)
418 spin_lock(&dp->d_lock);
421 list_del_init(&dp->d_alias);
422 list_add(&dp->d_alias, &(AFSTOV(vcp)->i_dentry));
423 dp->d_inode = AFSTOV(vcp);
424 #if defined(AFS_LINUX24_ENV)
425 #if defined(AFS_LINUX26_ENV)
426 spin_unlock(&dp->d_lock);
428 #if defined(HAVE_DCACHE_LOCK)
429 spin_unlock(&dcache_lock);
431 spin_unlock(&AFSTOV(vcp)->i_lock);
436 AFS_FAST_RELE(afs_globalVp);
442 #ifdef AFS_DARWIN80_ENV
443 afs_PutVCache(afs_globalVp);
445 AFS_FAST_RELE(afs_globalVp);
450 afs_rootFid.Fid.Volume = volid;
451 afs_rootFid.Fid.Vnode = 1;
452 afs_rootFid.Fid.Unique = 1;
456 afs_initState = 300; /* won */
457 afs_osi_Wakeup(&afs_initState);
458 afs_PutVolume(tvp, READ_LOCK);
460 if (afs_rootFid.Fid.Volume)
466 /* ptr_parm 0 is the pathname, size_parm 0 to the fetch is the chunk number */
468 BPath(struct brequest *ab)
470 struct dcache *tdc = NULL;
471 struct vcache *tvc = NULL;
472 struct vnode *tvn = NULL;
473 #ifdef AFS_LINUX22_ENV
474 struct dentry *dp = NULL;
476 afs_size_t offset, len;
477 struct vrequest treq;
481 if ((code = afs_InitReq(&treq, ab->cred)))
484 #ifdef AFS_LINUX22_ENV
485 code = gop_lookupname((char *)ab->ptr_parm[0], AFS_UIOSYS, 1, &dp);
487 tvn = (struct vnode *)dp->d_inode;
489 code = gop_lookupname((char *)ab->ptr_parm[0], AFS_UIOSYS, 1, &tvn);
492 osi_FreeLargeSpace((char *)ab->ptr_parm[0]); /* free path name buffer here */
495 /* now path may not have been in afs, so check that before calling our cache manager */
496 if (!tvn || !IsAfsVnode(tvn)) {
497 /* release it and give up */
499 #ifdef AFS_LINUX22_ENV
508 /* here we know its an afs vnode, so we can get the data for the chunk */
509 tdc = afs_GetDCache(tvc, ab->size_parm[0], &treq, &offset, &len, 1);
513 #ifdef AFS_LINUX22_ENV
520 /* size_parm 0 to the fetch is the chunk number,
521 * ptr_parm 0 is the dcache entry to wakeup,
522 * size_parm 1 is true iff we should release the dcache entry here.
525 BPrefetch(struct brequest *ab)
529 afs_size_t offset, len, abyte, totallen = 0;
530 struct vrequest treq;
532 AFS_STATCNT(BPrefetch);
533 if ((len = afs_InitReq(&treq, ab->cred)))
535 abyte = ab->size_parm[0];
538 tdc = afs_GetDCache(tvc, abyte, &treq, &offset, &len, 1);
544 } while ((totallen < afs_preCache) && tdc && (len > 0));
545 /* now, dude may be waiting for us to clear DFFetchReq bit; do so. Can't
546 * use tdc from GetDCache since afs_GetDCache may fail, but someone may
547 * be waiting for our wakeup anyway.
549 tdc = (struct dcache *)(ab->ptr_parm[0]);
550 ObtainSharedLock(&tdc->lock, 640);
551 if (tdc->mflags & DFFetchReq) {
552 UpgradeSToWLock(&tdc->lock, 641);
553 tdc->mflags &= ~DFFetchReq;
554 ReleaseWriteLock(&tdc->lock);
556 ReleaseSharedLock(&tdc->lock);
558 afs_osi_Wakeup(&tdc->validPos);
559 if (ab->size_parm[1]) {
560 afs_PutDCache(tdc); /* put this one back, too */
564 #if defined(AFS_CACHE_BYPASS)
566 BPrefetchNoCache(struct brequest *ab)
568 struct vrequest treq;
571 if ((len = afs_InitReq(&treq, ab->cred)))
575 /* OS-specific prefetch routine */
576 afs_PrefetchNoCache(ab->vc, ab->cred, (struct nocache_read_request *) ab->ptr_parm[0]);
582 BStore(struct brequest *ab)
586 struct vrequest treq;
587 #if defined(AFS_SGI_ENV)
588 struct cred *tmpcred;
592 if ((code = afs_InitReq(&treq, ab->cred)))
596 #if defined(AFS_SGI_ENV)
598 * Since StoreOnLastReference can end up calling osi_SyncVM which
599 * calls into VM code that assumes that u.u_cred has the
600 * correct credentials, we set our to theirs for this xaction
602 tmpcred = OSI_GET_CURRENT_CRED();
603 OSI_SET_CURRENT_CRED(ab->cred);
606 * To avoid recursion since the WriteLock may be released during VM
607 * operations, we hold the VOP_RWLOCK across this transaction as
608 * do the other callers of StoreOnLastReference
610 AFS_RWLOCK((vnode_t *) tvc, 1);
612 ObtainWriteLock(&tvc->lock, 209);
613 code = afs_StoreOnLastReference(tvc, &treq);
614 ReleaseWriteLock(&tvc->lock);
615 #if defined(AFS_SGI_ENV)
616 OSI_SET_CURRENT_CRED(tmpcred);
617 AFS_RWUNLOCK((vnode_t *) tvc, 1);
619 /* now set final return code, and wakeup anyone waiting */
620 if ((ab->flags & BUVALID) == 0) {
621 ab->code = afs_CheckCode(code, &treq, 43); /* set final code, since treq doesn't go across processes */
622 ab->flags |= BUVALID;
623 if (ab->flags & BUWAIT) {
624 ab->flags &= ~BUWAIT;
630 /* release a held request buffer */
632 afs_BRelease(struct brequest *ab)
635 AFS_STATCNT(afs_BRelease);
636 ObtainWriteLock(&afs_xbrs, 294);
637 if (--ab->refCount <= 0) {
641 afs_osi_Wakeup(&afs_brsWaiters);
642 ReleaseWriteLock(&afs_xbrs);
645 /* return true if bkg fetch daemons are all busy */
649 AFS_STATCNT(afs_BBusy);
650 if (afs_brsDaemons > 0)
656 afs_BQueue(short aopcode, struct vcache *avc,
657 afs_int32 dontwait, afs_int32 ause, afs_ucred_t *acred,
658 afs_size_t asparm0, afs_size_t asparm1, void *apparm0,
659 void *apparm1, void *apparm2)
664 AFS_STATCNT(afs_BQueue);
665 ObtainWriteLock(&afs_xbrs, 296);
668 for (i = 0; i < NBRS; i++, tb++) {
669 if (tb->refCount == 0)
674 tb->opcode = aopcode;
681 tb->refCount = ause + 1;
682 tb->size_parm[0] = asparm0;
683 tb->size_parm[1] = asparm1;
684 tb->ptr_parm[0] = apparm0;
685 tb->ptr_parm[1] = apparm1;
686 tb->ptr_parm[2] = apparm2;
689 tb->ts = afs_brs_count++;
690 /* if daemons are waiting for work, wake them up */
691 if (afs_brsDaemons > 0) {
692 afs_osi_Wakeup(&afs_brsDaemons);
694 ReleaseWriteLock(&afs_xbrs);
698 ReleaseWriteLock(&afs_xbrs);
701 /* no free buffers, sleep a while */
703 ReleaseWriteLock(&afs_xbrs);
704 afs_osi_Sleep(&afs_brsWaiters);
705 ObtainWriteLock(&afs_xbrs, 301);
711 /* AIX 4.1 has a much different sleep/wakeup mechanism available for use.
712 * The modifications here will work for either a UP or MP machine.
714 struct buf *afs_asyncbuf = (struct buf *)0;
715 tid_t afs_asyncbuf_cv = EVENT_NULL;
716 afs_int32 afs_biodcnt = 0;
718 /* in implementing this, I assumed that all external linked lists were
721 * Several places in this code traverse a linked list. The algorithm
722 * used here is probably unfamiliar to most people. Careful examination
723 * will show that it eliminates an assignment inside the loop, as compared
724 * to the standard algorithm, at the cost of occasionally using an extra
730 * This function obtains, and returns, a pointer to a buffer for
731 * processing by a daemon. It sleeps until such a buffer is available.
732 * The source of buffers for it is the list afs_asyncbuf (see also
733 * afs_gn_strategy). This function may be invoked concurrently by
734 * several processes, that is, several instances of the same daemon.
735 * afs_gn_strategy, which adds buffers to the list, runs at interrupt
736 * level, while get_bioreq runs at process level.
738 * Since AIX 4.1 can wake just one process at a time, the separate sleep
739 * addresses have been removed.
740 * Note that the kernel_lock is held until the e_sleep_thread() occurs.
741 * The afs_asyncbuf_lock is primarily used to serialize access between
742 * process and interrupts.
744 Simple_lock afs_asyncbuf_lock;
748 struct buf *bp = NULL;
750 struct buf **bestlbpP, **lbpP;
752 struct buf *t1P, *t2P; /* temp pointers for list manipulation */
755 struct afs_bioqueue *s;
757 /* ??? Does the forward pointer of the returned buffer need to be NULL?
760 /* Disable interrupts from the strategy function, and save the
761 * prior priority level and lock access to the afs_asyncbuf.
764 oldPriority = disable_lock(INTMAX, &afs_asyncbuf_lock);
768 /* look for oldest buffer */
769 bp = bestbp = afs_asyncbuf;
770 bestage = (long)bestbp->av_back;
771 bestlbpP = &afs_asyncbuf;
777 if ((long)bp->av_back - bestage < 0) {
780 bestage = (long)bp->av_back;
784 *bestlbpP = bp->av_forw;
787 /* If afs_asyncbuf is null, it is necessary to go to sleep.
788 * e_wakeup_one() ensures that only one thread wakes.
791 /* The LOCK_HANDLER indicates to e_sleep_thread to only drop the
792 * lock on an MP machine.
795 e_sleep_thread(&afs_asyncbuf_cv, &afs_asyncbuf_lock,
796 LOCK_HANDLER | INTERRUPTIBLE);
797 if (interrupted == THREAD_INTERRUPTED) {
798 /* re-enable interrupts from strategy */
799 unlock_enable(oldPriority, &afs_asyncbuf_lock);
803 } /* end of "else asyncbuf is empty" */
804 } /* end of "inner loop" */
808 unlock_enable(oldPriority, &afs_asyncbuf_lock);
811 /* For the convenience of other code, replace the gnodes in
812 * the b_vp field of bp and the other buffers on the b_work
813 * chain with the corresponding vnodes.
815 * ??? what happens to the gnodes? They're not just cut loose,
819 t2P = (struct buf *)t1P->b_work;
820 t1P->b_vp = ((struct gnode *)t1P->b_vp)->gn_vnode;
824 t1P = (struct buf *)t2P->b_work;
825 t2P->b_vp = ((struct gnode *)t2P->b_vp)->gn_vnode;
830 /* If the buffer does not specify I/O, it may immediately
831 * be returned to the caller. This condition is detected
832 * by examining the buffer's flags (the b_flags field). If
833 * the B_PFPROT bit is set, the buffer represents a protection
834 * violation, rather than a request for I/O. The remainder
835 * of the outer loop handles the case where the B_PFPROT bit is clear.
837 if (bp->b_flags & B_PFPROT) {
842 } /* end of function get_bioreq() */
847 * This function is the daemon. It is called from the syscall
848 * interface. Ordinarily, a script or an administrator will run a
849 * daemon startup utility, specifying the number of I/O daemons to
850 * run. The utility will fork off that number of processes,
851 * each making the appropriate syscall, which will cause this
852 * function to be invoked.
854 static int afs_initbiod = 0; /* this is self-initializing code */
857 afs_BioDaemon(afs_int32 nbiods)
859 afs_int32 code, s, pflg = 0;
861 struct buf *bp, *bp1, *tbp1, *tbp2; /* temp pointers only */
869 /* pin lock, since we'll be using it in an interrupt. */
870 lock_alloc(&afs_asyncbuf_lock, LOCK_ALLOC_PIN, 2, 1);
871 simple_lock_init(&afs_asyncbuf_lock);
872 pin(&afs_asyncbuf, sizeof(struct buf *));
873 pin(&afs_asyncbuf_cv, sizeof(afs_int32));
876 /* Ignore HUP signals... */
878 sigset_t sigbits, osigbits;
880 * add SIGHUP to the set of already masked signals
882 SIGFILLSET(sigbits); /* allow all signals */
883 SIGDELSET(sigbits, SIGHUP); /* except SIGHUP */
884 limit_sigs(&sigbits, &osigbits); /* and already masked */
886 /* Main body starts here -- this is an intentional infinite loop, and
889 * Now, the loop will exit if get_bioreq() returns NULL, indicating
890 * that we've been interrupted.
893 bp = afs_get_bioreq();
895 break; /* we were interrupted */
896 if (code = setjmpx(&jmpbuf)) {
897 /* This should not have happend, maybe a lack of resources */
899 s = disable_lock(INTMAX, &afs_asyncbuf_lock);
900 for (bp1 = bp; bp; bp = bp1) {
902 bp1 = (struct buf *)bp1->b_work;
905 bp->b_flags |= B_ERROR;
908 unlock_enable(s, &afs_asyncbuf_lock);
912 vcp = VTOAFS(bp->b_vp);
913 if (bp->b_flags & B_PFSTORE) { /* XXXX */
914 ObtainWriteLock(&vcp->lock, 404);
915 if (vcp->v.v_gnode->gn_mwrcnt) {
916 afs_offs_t newlength =
917 (afs_offs_t) dbtob(bp->b_blkno) + bp->b_bcount;
918 if (vcp->f.m.Length < newlength) {
919 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH,
920 ICL_TYPE_STRING, __FILE__, ICL_TYPE_LONG,
921 __LINE__, ICL_TYPE_OFFSET,
922 ICL_HANDLE_OFFSET(vcp->f.m.Length),
923 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(newlength));
924 vcp->f.m.Length = newlength;
927 ReleaseWriteLock(&vcp->lock);
929 /* If the buffer represents a protection violation, rather than
930 * an actual request for I/O, no special action need be taken.
932 if (bp->b_flags & B_PFPROT) {
933 iodone(bp); /* Notify all users of the buffer that we're done */
938 ObtainWriteLock(&vcp->pvmlock, 211);
940 * First map its data area to a region in the current address space
941 * by calling vm_att with the subspace identifier, and a pointer to
942 * the data area. vm_att returns a new data area pointer, but we
943 * also want to hang onto the old one.
945 tmpaddr = bp->b_baddr;
946 bp->b_baddr = (caddr_t) vm_att(bp->b_xmemd.subspace_id, tmpaddr);
947 tmperr = afs_ustrategy(bp); /* temp variable saves offset calculation */
948 if (tmperr) { /* in non-error case */
949 bp->b_flags |= B_ERROR; /* should other flags remain set ??? */
950 bp->b_error = tmperr;
953 /* Unmap the buffer's data area by calling vm_det. Reset data area
954 * to the value that we saved above.
957 bp->b_baddr = tmpaddr;
960 * buffer may be linked with other buffers via the b_work field.
961 * See also afs_gn_strategy. For each buffer in the chain (including
962 * bp) notify all users of the buffer that the daemon is finished
963 * using it by calling iodone.
964 * assumes iodone can modify the b_work field.
967 tbp2 = (struct buf *)tbp1->b_work;
972 tbp1 = (struct buf *)tbp2->b_work;
978 ReleaseWriteLock(&vcp->pvmlock); /* Unlock the vnode. */
980 } /* infinite loop (unless we're interrupted) */
981 } /* end of afs_BioDaemon() */
983 #endif /* AFS_AIX41_ENV */
988 afs_BackgroundDaemon_once(void)
990 LOCK_INIT(&afs_xbrs, "afs_xbrs");
991 memset(afs_brs, 0, sizeof(afs_brs));
993 #if defined (AFS_SGI_ENV) && defined(AFS_SGI_SHORTSTACK)
995 * steal the first daemon for doing delayed DSlot flushing
996 * (see afs_GetDownDSlot)
1000 exit(CLD_EXITED, 0);
1005 brequest_release(struct brequest *tb)
1008 AFS_RELE(AFSTOV(tb->vc)); /* MUST call vnode layer or could lose vnodes */
1013 tb->cred = (afs_ucred_t *)0;
1015 afs_BRelease(tb); /* this grabs and releases afs_xbrs lock */
1018 #ifdef AFS_DARWIN80_ENV
1020 afs_BackgroundDaemon(struct afs_uspc_param *uspc, void *param1, void *param2)
1023 afs_BackgroundDaemon(void)
1026 struct brequest *tb;
1029 AFS_STATCNT(afs_BackgroundDaemon);
1030 /* initialize subsystem */
1032 /* Irix with "short stack" exits */
1033 afs_BackgroundDaemon_once();
1035 #ifdef AFS_DARWIN80_ENV
1036 /* If it's a re-entering syscall, complete the request and release */
1037 if (uspc->ts > -1) {
1039 for (i = 0; i < NBRS; i++, tb++) {
1040 if (tb->ts == uspc->ts) {
1041 /* copy the userspace status back in */
1042 ((struct afs_uspc_param *) tb->ptr_parm[0])->retval =
1044 /* mark it valid and notify our caller */
1045 tb->flags |= BUVALID;
1046 if (tb->flags & BUWAIT) {
1047 tb->flags &= ~BUWAIT;
1050 brequest_release(tb);
1055 afs_osi_MaskUserLoop();
1057 /* Otherwise it's a new one */
1059 #ifdef AFS_DARWIN80_ENV
1063 ObtainWriteLock(&afs_xbrs, 302);
1066 struct brequest *min_tb = NULL;
1068 if (afs_termState == AFSOP_STOP_BKG) {
1069 if (--afs_nbrs <= 0)
1070 afs_termState = AFSOP_STOP_TRUNCDAEMON;
1071 ReleaseWriteLock(&afs_xbrs);
1072 afs_osi_Wakeup(&afs_termState);
1073 #ifdef AFS_DARWIN80_ENV
1080 /* find a request */
1083 for (i = 0; i < NBRS; i++, tb++) {
1084 /* look for request with smallest ts */
1085 if ((tb->refCount > 0) && !(tb->flags & BSTARTED)) {
1086 /* new request, not yet picked up */
1087 if ((min_tb && (min_ts - tb->ts > 0)) || !min_tb) {
1093 if ((tb = min_tb)) {
1094 /* claim and process this request */
1095 tb->flags |= BSTARTED;
1096 ReleaseWriteLock(&afs_xbrs);
1098 afs_Trace1(afs_iclSetp, CM_TRACE_BKG1, ICL_TYPE_INT32,
1100 if (tb->opcode == BOP_FETCH)
1102 #if defined(AFS_CACHE_BYPASS)
1103 else if (tb->opcode == BOP_FETCH_NOCACHE)
1104 BPrefetchNoCache(tb);
1106 else if (tb->opcode == BOP_STORE)
1108 else if (tb->opcode == BOP_PATH)
1110 #ifdef AFS_DARWIN80_ENV
1111 else if (tb->opcode == BOP_MOVE) {
1112 memcpy(uspc, (struct afs_uspc_param *) tb->ptr_parm[0],
1113 sizeof(struct afs_uspc_param));
1115 /* string lengths capped in move vop; copy NUL tho */
1116 memcpy(param1, (char *)tb->ptr_parm[1],
1117 strlen(tb->ptr_parm[1])+1);
1118 memcpy(param2, (char *)tb->ptr_parm[2],
1119 strlen(tb->ptr_parm[2])+1);
1124 panic("background bop");
1125 brequest_release(tb);
1126 ObtainWriteLock(&afs_xbrs, 305);
1129 /* wait for new request */
1131 ReleaseWriteLock(&afs_xbrs);
1132 afs_osi_Sleep(&afs_brsDaemons);
1133 ObtainWriteLock(&afs_xbrs, 307);
1137 #ifdef AFS_DARWIN80_ENV
1144 shutdown_daemons(void)
1146 AFS_STATCNT(shutdown_daemons);
1147 if (afs_cold_shutdown) {
1148 afs_brsDaemons = brsInit = 0;
1149 rxepoch_checked = afs_nbrs = 0;
1150 memset(afs_brs, 0, sizeof(afs_brs));
1151 memset(&afs_xbrs, 0, sizeof(afs_lock_t));
1153 #ifdef AFS_AIX41_ENV
1154 lock_free(&afs_asyncbuf_lock);
1155 unpin(&afs_asyncbuf, sizeof(struct buf *));
1156 unpin(&afs_asyncbuf_cv, sizeof(afs_int32));
1162 #if defined(AFS_SGI_ENV) && defined(AFS_SGI_SHORTSTACK)
1164 * sgi - daemon - handles certain operations that otherwise
1165 * would use up too much kernel stack space
1167 * This all assumes that since the caller must have the xdcache lock
1168 * exclusively that the list will never be more than one long
1169 * and noone else can attempt to add anything until we're done.
1171 SV_TYPE afs_sgibksync;
1172 SV_TYPE afs_sgibkwait;
1173 lock_t afs_sgibklock;
1174 struct dcache *afs_sgibklist;
1182 if (afs_sgibklock == NULL) {
1183 SV_INIT(&afs_sgibksync, "bksync", 0, 0);
1184 SV_INIT(&afs_sgibkwait, "bkwait", 0, 0);
1185 SPINLOCK_INIT(&afs_sgibklock, "bklock");
1187 s = SPLOCK(afs_sgibklock);
1189 /* wait for something to do */
1190 SP_WAIT(afs_sgibklock, s, &afs_sgibksync, PINOD);
1191 osi_Assert(afs_sgibklist);
1193 /* XX will probably need to generalize to real list someday */
1194 s = SPLOCK(afs_sgibklock);
1195 while (afs_sgibklist) {
1196 tdc = afs_sgibklist;
1197 afs_sgibklist = NULL;
1198 SPUNLOCK(afs_sgibklock, s);
1200 tdc->dflags &= ~DFEntryMod;
1201 afs_WriteDCache(tdc, 1);
1203 s = SPLOCK(afs_sgibklock);
1206 /* done all the work - wake everyone up */
1207 while (SV_SIGNAL(&afs_sgibkwait));