2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
9 * Portions Copyright (c) 2005-2008 Sine Nomine Associates
15 Institution: The Information Technology Center, Carnegie-Mellon University
18 #include <afsconfig.h>
19 #include <afs/param.h>
25 #ifdef HAVE_SYS_FILE_H
30 #include "rx/rx_queue.h"
31 #include <afs/afsint.h>
33 #include <afs/errors.h>
36 #include <afs/afssyscalls.h>
40 #include "volume_inline.h"
41 #include "vnode_inline.h"
42 #include "partition.h"
49 struct VnodeClassInfo VnodeClassInfo[nVNODECLASSES];
51 void VNLog(afs_int32 aop, afs_int32 anparms, ... );
58 #define BAD_IGET -1000
60 /* There are two separate vnode queue types defined here:
61 * Each hash conflict chain -- is singly linked, with a single head
62 * pointer. New entries are added at the beginning. Old
63 * entries are removed by linear search, which generally
64 * only occurs after a disk read).
65 * LRU chain -- is doubly linked, single head pointer.
66 * Entries are added at the head, reclaimed from the tail,
67 * or removed from anywhere in the queue.
71 /* Vnode hash table. Find hash chain by taking lower bits of
72 * (volume_hash_offset + vnode).
73 * This distributes the root inodes of the volumes over the
74 * hash table entries and also distributes the vnodes of
75 * volumes reasonably fairly. The volume_hash_offset field
76 * for each volume is established as the volume comes on line
77 * by using the VOLUME_HASH_OFFSET macro. This distributes the
78 * volumes fairly among the cache entries, both when servicing
79 * a small number of volumes and when servicing a large number.
82 /* logging stuff for finding bugs */
83 #define THELOGSIZE 5120
84 static afs_int32 theLog[THELOGSIZE];
85 static afs_int32 vnLogPtr = 0;
87 VNLog(afs_int32 aop, afs_int32 anparms, ... )
92 va_start(ap, anparms);
95 anparms = 4; /* do bounds checking */
97 temp = (aop << 16) | anparms;
98 theLog[vnLogPtr++] = temp;
99 if (vnLogPtr >= THELOGSIZE)
101 for (temp = 0; temp < anparms; temp++) {
102 theLog[vnLogPtr++] = va_arg(ap, afs_int32);
103 if (vnLogPtr >= THELOGSIZE)
109 /* VolumeHashOffset -- returns a new value to be stored in the
110 * volumeHashOffset of a Volume structure. Called when a
111 * volume is initialized. Sets the volumeHashOffset so that
112 * vnode cache entries are distributed reasonably between
113 * volumes (the root vnodes of the volumes will hash to
114 * different values, and spacing is maintained between volumes
115 * when there are not many volumes represented), and spread
116 * equally amongst vnodes within a single volume.
119 VolumeHashOffset_r(void)
121 static int nextVolumeHashOffset = 0;
122 /* hashindex Must be power of two in size */
124 # define hashMask ((1<<hashShift)-1)
125 static byte hashindex[1 << hashShift] =
126 { 0, 128, 64, 192, 32, 160, 96, 224 };
128 offset = hashindex[nextVolumeHashOffset & hashMask]
129 + (nextVolumeHashOffset >> hashShift);
130 nextVolumeHashOffset++;
134 /* Change hashindex (above) if you change this constant */
135 #define VNODE_HASH_TABLE_SIZE 256
136 private Vnode *VnodeHashTable[VNODE_HASH_TABLE_SIZE];
137 #define VNODE_HASH(volumeptr,vnodenumber)\
138 ((volumeptr->vnodeHashOffset + vnodenumber)&(VNODE_HASH_TABLE_SIZE-1))
142 * add a vnode to the volume's vnode list.
144 * @param[in] vp volume object pointer
145 * @param[in] vnp vnode object pointer
147 * @note for DAFS, it may seem like we should be acquiring a lightweight ref
148 * on vp, but this would actually break things. Right now, this is ok
149 * because we destroy all vnode cache contents during during volume
154 * @internal volume package internal use only
157 AddToVVnList(Volume * vp, Vnode * vnp)
159 if (queue_IsOnQueue(vnp))
163 Vn_cacheCheck(vnp) = vp->cacheCheck;
164 queue_Append(&vp->vnode_list, vnp);
165 Vn_stateFlags(vnp) |= VN_ON_VVN;
169 * delete a vnode from the volume's vnode list.
173 * @internal volume package internal use only
176 DeleteFromVVnList(Vnode * vnp)
178 Vn_volume(vnp) = NULL;
180 if (!queue_IsOnQueue(vnp))
184 Vn_stateFlags(vnp) &= ~(VN_ON_VVN);
188 * add a vnode to the end of the lru.
190 * @param[in] vcp vnode class info object pointer
191 * @param[in] vnp vnode object pointer
193 * @internal vnode package internal use only
196 AddToVnLRU(struct VnodeClassInfo * vcp, Vnode * vnp)
198 if (Vn_stateFlags(vnp) & VN_ON_LRU) {
202 /* Add it to the circular LRU list */
203 if (vcp->lruHead == NULL)
204 Abort("VPutVnode: vcp->lruHead==NULL");
206 vnp->lruNext = vcp->lruHead;
207 vnp->lruPrev = vcp->lruHead->lruPrev;
208 vcp->lruHead->lruPrev = vnp;
209 vnp->lruPrev->lruNext = vnp;
213 /* If the vnode was just deleted, put it at the end of the chain so it
214 * will be reused immediately */
216 vcp->lruHead = vnp->lruNext;
218 Vn_stateFlags(vnp) |= VN_ON_LRU;
222 * delete a vnode from the lru.
224 * @param[in] vcp vnode class info object pointer
225 * @param[in] vnp vnode object pointer
227 * @internal vnode package internal use only
230 DeleteFromVnLRU(struct VnodeClassInfo * vcp, Vnode * vnp)
232 if (!(Vn_stateFlags(vnp) & VN_ON_LRU)) {
236 if (vnp == vcp->lruHead)
237 vcp->lruHead = vcp->lruHead->lruNext;
239 if ((vnp == vcp->lruHead) ||
240 (vcp->lruHead == NULL))
241 Abort("DeleteFromVnLRU: lru chain addled!\n");
243 vnp->lruPrev->lruNext = vnp->lruNext;
244 vnp->lruNext->lruPrev = vnp->lruPrev;
246 Vn_stateFlags(vnp) &= ~(VN_ON_LRU);
250 * add a vnode to the vnode hash table.
252 * @param[in] vnp vnode object pointer
256 * @post vnode on hash
258 * @internal vnode package internal use only
261 AddToVnHash(Vnode * vnp)
263 unsigned int newHash;
265 if (!(Vn_stateFlags(vnp) & VN_ON_HASH)) {
266 newHash = VNODE_HASH(Vn_volume(vnp), Vn_id(vnp));
267 vnp->hashNext = VnodeHashTable[newHash];
268 VnodeHashTable[newHash] = vnp;
269 vnp->hashIndex = newHash;
271 Vn_stateFlags(vnp) |= VN_ON_HASH;
276 * delete a vnode from the vnode hash table.
283 * @post vnode removed from hash
285 * @internal vnode package internal use only
288 DeleteFromVnHash(Vnode * vnp)
292 if (Vn_stateFlags(vnp) & VN_ON_HASH) {
293 tvnp = VnodeHashTable[vnp->hashIndex];
295 VnodeHashTable[vnp->hashIndex] = vnp->hashNext;
297 while (tvnp && tvnp->hashNext != vnp)
298 tvnp = tvnp->hashNext;
300 tvnp->hashNext = vnp->hashNext;
303 vnp->hashNext = NULL;
305 Vn_stateFlags(vnp) &= ~(VN_ON_HASH);
311 * invalidate a vnode cache entry.
313 * @param[in] avnode vnode object pointer
317 * @post vnode metadata invalidated.
318 * vnode removed from hash table.
319 * DAFS: vnode state set to VN_STATE_INVALID.
321 * @internal vnode package internal use only
324 VInvalidateVnode_r(struct Vnode *avnode)
326 avnode->changed_newTime = 0; /* don't let it get flushed out again */
327 avnode->changed_oldTime = 0;
328 avnode->delete = 0; /* it isn't deleted, really */
329 avnode->cacheCheck = 0; /* invalid: prevents future vnode searches from working */
330 DeleteFromVnHash(avnode);
331 #ifdef AFS_DEMAND_ATTACH_FS
332 VnChangeState_r(avnode, VN_STATE_INVALID);
338 * initialize vnode cache for a given vnode class.
340 * @param[in] class vnode class
341 * @param[in] nVnodes size of cache
343 * @post vnode cache allocated and initialized
345 * @internal volume package internal use only
347 * @note generally called by VInitVolumePackage_r
349 * @see VInitVolumePackage_r
352 VInitVnodes(VnodeClass class, int nVnodes)
355 struct VnodeClassInfo *vcp = &VnodeClassInfo[class];
357 vcp->allocs = vcp->gets = vcp->reads = vcp->writes = 0;
358 vcp->cacheSize = nVnodes;
361 osi_Assert(CHECKSIZE_SMALLVNODE);
363 vcp->residentSize = SIZEOF_SMALLVNODE;
364 vcp->diskSize = SIZEOF_SMALLDISKVNODE;
365 vcp->magic = SMALLVNODEMAGIC;
369 vcp->residentSize = SIZEOF_LARGEVNODE;
370 vcp->diskSize = SIZEOF_LARGEDISKVNODE;
371 vcp->magic = LARGEVNODEMAGIC;
375 int s = vcp->diskSize - 1;
385 va = (byte *) calloc(nVnodes, vcp->residentSize);
386 osi_Assert(va != NULL);
388 Vnode *vnp = (Vnode *) va;
389 Vn_refcount(vnp) = 0; /* no context switches */
390 Vn_stateFlags(vnp) |= VN_ON_LRU;
391 #ifdef AFS_DEMAND_ATTACH_FS
392 CV_INIT(&Vn_stateCV(vnp), "vnode state", CV_DEFAULT, 0);
393 Vn_state(vnp) = VN_STATE_INVALID;
395 #else /* !AFS_DEMAND_ATTACH_FS */
396 Lock_Init(&vnp->lock);
397 #endif /* !AFS_DEMAND_ATTACH_FS */
398 vnp->changed_oldTime = 0;
399 vnp->changed_newTime = 0;
400 Vn_volume(vnp) = NULL;
401 Vn_cacheCheck(vnp) = 0;
402 vnp->delete = Vn_id(vnp) = 0;
403 #ifdef AFS_PTHREAD_ENV
404 vnp->writer = (pthread_t) 0;
405 #else /* AFS_PTHREAD_ENV */
406 vnp->writer = (PROCESS) 0;
407 #endif /* AFS_PTHREAD_ENV */
411 if (vcp->lruHead == NULL)
412 vcp->lruHead = vnp->lruNext = vnp->lruPrev = vnp;
414 vnp->lruNext = vcp->lruHead;
415 vnp->lruPrev = vcp->lruHead->lruPrev;
416 vcp->lruHead->lruPrev = vnp;
417 vnp->lruPrev->lruNext = vnp;
420 va += vcp->residentSize;
427 * allocate an unused vnode from the lru chain.
429 * @param[in] vcp vnode class info object pointer
430 * @param[in] vp volume pointer
431 * @param[in] vnodeNumber new vnode number that the vnode will be used for
433 * @pre VOL_LOCK is held
435 * @post vnode object is removed from lru
436 * vnode is disassociated with its old volume, and associated with its
438 * vnode is removed from its old vnode hash table, and for DAFS, it is
439 * added to its new hash table
440 * state is set to VN_STATE_INVALID.
441 * inode handle is released.
442 * a reservation is held on the vnode object
444 * @note we traverse backwards along the lru circlist. It shouldn't
445 * be necessary to specify that nUsers == 0 since if it is in the list,
446 * nUsers should be 0. Things shouldn't be in lruq unless no one is
449 * @warning DAFS: VOL_LOCK is dropped while doing inode handle release
451 * @warning for non-DAFS, the vnode is _not_ hashed on the vnode hash table;
452 * non-DAFS must hash the vnode itself after loading data
454 * @return vnode object pointer
457 VGetFreeVnode_r(struct VnodeClassInfo * vcp, struct Volume *vp,
462 vnp = vcp->lruHead->lruPrev;
463 #ifdef AFS_DEMAND_ATTACH_FS
464 if (Vn_refcount(vnp) != 0 || VnIsExclusiveState(Vn_state(vnp)) ||
465 Vn_readers(vnp) != 0)
466 Abort("VGetFreeVnode_r: in-use vnode in lruq");
468 if (Vn_refcount(vnp) != 0 || CheckLock(&vnp->lock))
469 Abort("VGetFreeVnode_r: locked vnode in lruq");
471 VNLog(1, 2, Vn_id(vnp), (intptr_t)vnp, 0, 0);
474 * it's going to be overwritten soon enough.
475 * remove from LRU, delete hash entry, and
476 * disassociate from old parent volume before
477 * we have a chance to drop the vol glock
479 DeleteFromVnLRU(vcp, vnp);
480 DeleteFromVnHash(vnp);
481 if (Vn_volume(vnp)) {
482 DeleteFromVVnList(vnp);
485 /* we must re-hash the vnp _before_ we drop the glock again; otherwise,
486 * someone else might try to grab the same vnode id, and we'll both alloc
487 * a vnode object for the same vn id, bypassing vnode locking */
488 Vn_id(vnp) = vnodeNumber;
489 VnCreateReservation_r(vnp);
490 AddToVVnList(vp, vnp);
491 #ifdef AFS_DEMAND_ATTACH_FS
495 /* drop the file descriptor */
497 #ifdef AFS_DEMAND_ATTACH_FS
498 VnChangeState_r(vnp, VN_STATE_RELEASING);
501 /* release is, potentially, a highly latent operation due to a couple
503 * - ihandle package lock contention
504 * - closing file descriptor(s) associated with ih
506 * Hance, we perform outside of the volume package lock in order to
507 * reduce the probability of contention.
509 IH_RELEASE(vnp->handle);
510 #ifdef AFS_DEMAND_ATTACH_FS
515 #ifdef AFS_DEMAND_ATTACH_FS
516 VnChangeState_r(vnp, VN_STATE_INVALID);
524 * lookup a vnode in the vnode cache hash table.
526 * @param[in] vp pointer to volume object
527 * @param[in] vnodeId vnode id
531 * @post matching vnode object or NULL is returned
533 * @return vnode object pointer
534 * @retval NULL no matching vnode object was found in the cache
536 * @internal vnode package internal use only
538 * @note this symbol is exported strictly for fssync debug protocol use
541 VLookupVnode(Volume * vp, VnodeId vnodeId)
544 unsigned int newHash;
546 newHash = VNODE_HASH(vp, vnodeId);
547 for (vnp = VnodeHashTable[newHash];
549 ((Vn_id(vnp) != vnodeId) ||
550 (Vn_volume(vnp) != vp) ||
551 (vp->cacheCheck != Vn_cacheCheck(vnp))));
552 vnp = vnp->hashNext);
559 VAllocVnode(Error * ec, Volume * vp, VnodeType type, VnodeId in_vnode, Unique in_unique)
563 retVal = VAllocVnode_r(ec, vp, type, in_vnode, in_unique);
569 * allocate a new vnode.
571 * @param[out] ec error code return
572 * @param[in] vp volume object pointer
573 * @param[in] type desired vnode type
574 * @param[in] type desired vnode ID (optional)
575 * @param[in] type desired vnode Unique (optional)
577 * @return vnode object pointer
579 * @pre VOL_LOCK held;
580 * heavyweight ref held on vp
582 * @post vnode allocated and returned
585 VAllocVnode_r(Error * ec, Volume * vp, VnodeType type, VnodeId in_vnode, Unique in_unique)
590 struct VnodeClassInfo *vcp;
593 struct vnodeIndex *index;
596 #ifdef AFS_DEMAND_ATTACH_FS
597 VolState vol_state_save;
602 #ifdef AFS_DEMAND_ATTACH_FS
604 * once a volume has entered an error state, don't permit
605 * further operations to proceed
606 * -- tkeiser 11/21/2007
608 VWaitExclusiveState_r(vp);
609 if (VIsErrorState(V_attachState(vp))) {
610 /* XXX is VSALVAGING acceptable here? */
616 if (programType == fileServer && !V_inUse(vp)) {
617 if (vp->specialStatus) {
618 *ec = vp->specialStatus;
624 class = vnodeTypeToClass(type);
625 vcp = &VnodeClassInfo[class];
627 if (!VolumeWriteable(vp)) {
628 *ec = (bit32) VREADONLY;
632 if (vp->nextVnodeUnique > V_uniquifier(vp)) {
633 VUpdateVolume_r(ec, vp, 0);
638 if (programType == fileServer) {
639 VAddToVolumeUpdateList_r(ec, vp);
645 * If in_vnode and in_unique are specified, we are asked to
646 * allocate a specifc vnode slot. Used by RW replication to
647 * keep vnode IDs consistent with the master.
651 unique = vp->nextVnodeUnique++;
653 unique = vp->nextVnodeUnique++;
655 if (vp->nextVnodeUnique > V_uniquifier(vp)) {
656 VUpdateVolume_r(ec, vp, 0);
661 /* Find a slot in the bit map */
662 bitNumber = VAllocBitmapEntry_r(ec, vp, &vp->vnodeIndex[class],
663 VOL_ALLOC_BITMAP_WAIT);
667 vnodeNumber = bitNumberToVnodeNumber(bitNumber, class);
669 index = &vp->vnodeIndex[class];
674 /* Catch us up to where the master is */
675 if (in_unique > vp->nextVnodeUnique)
676 vp->nextVnodeUnique = in_unique+1;
678 if (vp->nextVnodeUnique > V_uniquifier(vp)) {
679 VUpdateVolume_r(ec, vp, 0);
685 bitNumber = vnodeIdToBitNumber(in_vnode);
686 offset = bitNumber >> 3;
688 /* Mark vnode in use. Grow bitmap if needed. */
689 if ((offset >= index->bitmapSize)
690 || ((*(index->bitmap + offset) & (1 << (bitNumber & 0x7))) == 0))
692 /* Should not happen */
693 if (*(index->bitmap + offset) & (1 << (bitNumber & 0x7))) {
698 *(index->bitmap + offset) |= (1 << (bitNumber & 0x7));
699 vnodeNumber = in_vnode;
704 * at this point we should be assured that V_attachState(vp) is non-exclusive
708 VNLog(2, 1, vnodeNumber, 0, 0, 0);
709 /* Prepare to move it to the new hash chain */
710 vnp = VLookupVnode(vp, vnodeNumber);
712 /* slot already exists. May even not be in lruq (consider store file locking a file being deleted)
713 * so we may have to wait for it below */
714 VNLog(3, 2, vnodeNumber, (intptr_t)vnp, 0, 0);
716 VnCreateReservation_r(vnp);
717 if (Vn_refcount(vnp) == 1) {
718 /* we're the only user */
719 /* This won't block */
720 VnLock(vnp, WRITE_LOCK, VOL_LOCK_HELD, WILL_NOT_DEADLOCK);
722 #ifdef AFS_DEMAND_ATTACH_FS
725 * vnode was cached, wait for any existing exclusive ops to finish.
726 * once we have reacquired the lock, re-verify volume state.
728 * note: any vnode error state is related to the old vnode; disregard.
730 VnWaitQuiescent_r(vnp);
731 if (VIsErrorState(V_attachState(vp))) {
732 VnUnlock(vnp, WRITE_LOCK);
733 VnCancelReservation_r(vnp);
739 /* other users present; follow locking hierarchy */
740 VnLock(vnp, WRITE_LOCK, VOL_LOCK_HELD, MIGHT_DEADLOCK);
743 * verify state of the world hasn't changed
745 * (technically, this should never happen because cachecheck
746 * is only updated during a volume attach, which should not
747 * happen when refs are held)
749 if (Vn_volume(vnp)->cacheCheck != Vn_cacheCheck(vnp)) {
750 VnUnlock(vnp, WRITE_LOCK);
751 VnCancelReservation_r(vnp);
756 /* sanity check: vnode should be blank if it was deleted. If it's
757 * not blank, it is still in use somewhere; but the bitmap told us
758 * this vnode number was free, so something is wrong. */
759 if (vnp->disk.type != vNull) {
761 Log("VAllocVnode: addled bitmap or vnode object! (vol %ld, "
762 "vnode %p, number %ld, type %ld)\n", (long)vp->hashid, vnp,
763 (long)Vn_id(vnp), (long)vnp->disk.type);
765 VFreeBitMapEntry_r(&tmp, vp, &vp->vnodeIndex[class], bitNumber,
766 VOL_FREE_BITMAP_WAIT);
767 VInvalidateVnode_r(vnp);
768 VnUnlock(vnp, WRITE_LOCK);
769 VnCancelReservation_r(vnp);
770 #ifdef AFS_DEMAND_ATTACH_FS
771 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, 0);
773 VForceOffline_r(vp, 0);
779 /* no such vnode in the cache */
781 vnp = VGetFreeVnode_r(vcp, vp, vnodeNumber);
783 /* This will never block (guaranteed by check in VGetFreeVnode_r() */
784 VnLock(vnp, WRITE_LOCK, VOL_LOCK_HELD, WILL_NOT_DEADLOCK);
786 #ifdef AFS_DEMAND_ATTACH_FS
787 VnChangeState_r(vnp, VN_STATE_ALLOC);
790 /* Sanity check: is this vnode really not in use? */
793 IHandle_t *ihP = vp->vnodeIndex[class].handle;
795 afs_foff_t off = vnodeIndexOffset(vcp, vnodeNumber);
798 /* XXX we have a potential race here if two threads
799 * allocate new vnodes at the same time, and they
800 * both decide it's time to extend the index
803 #ifdef AFS_DEMAND_ATTACH_FS
805 * this race has been eliminated for the DAFS case
806 * using exclusive state VOL_STATE_VNODE_ALLOC
808 * if this becomes a bottleneck, there are ways to
809 * improve parallelism for this code path
810 * -- tkeiser 11/28/2007
812 VCreateReservation_r(vp);
813 VWaitExclusiveState_r(vp);
814 vol_state_save = VChangeState_r(vp, VOL_STATE_VNODE_ALLOC);
820 Log("VAllocVnode: can't open index file!\n");
822 goto error_encountered;
824 if ((size = FDH_SIZE(fdP)) < 0) {
825 Log("VAllocVnode: can't stat index file!\n");
827 goto error_encountered;
829 if (off + vcp->diskSize <= size) {
830 if (FDH_PREAD(fdP, &vnp->disk, vcp->diskSize, off) != vcp->diskSize) {
831 Log("VAllocVnode: can't read index file!\n");
833 goto error_encountered;
835 if (vnp->disk.type != vNull) {
836 Log("VAllocVnode: addled bitmap or index!\n");
838 goto error_encountered;
841 /* growing file - grow in a reasonable increment */
842 char *buf = malloc(16 * 1024);
844 Log("VAllocVnode: can't grow vnode index: out of memory\n");
846 goto error_encountered;
848 memset(buf, 0, 16 * 1024);
849 if ((FDH_PWRITE(fdP, buf, 16 * 1024, off)) != 16 * 1024) {
850 Log("VAllocVnode: can't grow vnode index: write failed\n");
853 goto error_encountered;
860 #ifdef AFS_DEMAND_ATTACH_FS
861 VChangeState_r(vp, vol_state_save);
862 VCancelReservation_r(vp);
869 * close the file handle
871 * invalidate the vnode
872 * free up the bitmap entry (although salvager should take care of it)
874 * drop vnode lock and refs
879 VFreeBitMapEntry_r(&tmp, vp, &vp->vnodeIndex[class], bitNumber, 0 /*flags*/);
880 VInvalidateVnode_r(vnp);
881 VnUnlock(vnp, WRITE_LOCK);
882 VnCancelReservation_r(vnp);
883 #ifdef AFS_DEMAND_ATTACH_FS
884 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, 0);
885 VCancelReservation_r(vp);
887 VForceOffline_r(vp, 0);
892 VNLog(4, 2, vnodeNumber, (intptr_t)vnp, 0, 0);
893 #ifndef AFS_DEMAND_ATTACH_FS
898 VNLog(5, 1, (intptr_t)vnp, 0, 0, 0);
899 memset(&vnp->disk, 0, sizeof(vnp->disk));
900 vnp->changed_newTime = 0; /* set this bit when vnode is updated */
901 vnp->changed_oldTime = 0; /* set this on CopyOnWrite. */
903 vnp->disk.vnodeMagic = vcp->magic;
904 vnp->disk.type = type;
905 vnp->disk.uniquifier = unique;
908 vp->header->diskstuff.filecount++;
909 #ifdef AFS_DEMAND_ATTACH_FS
910 VnChangeState_r(vnp, VN_STATE_EXCLUSIVE);
916 * load a vnode from disk.
918 * @param[out] ec client error code return
919 * @param[in] vp volume object pointer
920 * @param[in] vnp vnode object pointer
921 * @param[in] vcp vnode class info object pointer
922 * @param[in] class vnode class enumeration
924 * @pre vnode is registered in appropriate data structures;
925 * caller holds a ref on vnode; VOL_LOCK is held
927 * @post vnode data is loaded from disk.
928 * vnode state is set to VN_STATE_ONLINE.
929 * on failure, vnode is invalidated.
931 * @internal vnode package internal use only
934 VnLoad(Error * ec, Volume * vp, Vnode * vnp,
935 struct VnodeClassInfo * vcp, VnodeClass class)
937 /* vnode not cached */
941 IHandle_t *ihP = vp->vnodeIndex[class].handle;
948 #ifdef AFS_DEMAND_ATTACH_FS
949 VnChangeState_r(vnp, VN_STATE_LOAD);
952 /* This will never block */
953 VnLock(vnp, WRITE_LOCK, VOL_LOCK_HELD, WILL_NOT_DEADLOCK);
958 Log("VnLoad: can't open index dev=%u, i=%s\n", vp->device,
959 PrintInode(stmp, vp->vnodeIndex[class].handle->ih_ino));
961 goto error_encountered_nolock;
962 } else if ((nBytes = FDH_PREAD(fdP, (char *)&vnp->disk, vcp->diskSize, vnodeIndexOffset(vcp, Vn_id(vnp))))
964 /* Don't take volume off line if the inumber is out of range
965 * or the inode table is full. */
966 if (nBytes == BAD_IGET) {
967 Log("VnLoad: bad inumber %s\n",
968 PrintInode(stmp, vp->vnodeIndex[class].handle->ih_ino));
971 } else if (nBytes == -1 && errno == EIO) {
972 /* disk error; salvage */
973 Log("VnLoad: Couldn't read vnode %u, volume %u (%s); volume needs salvage\n", Vn_id(vnp), V_id(vp), V_name(vp));
975 /* vnode is not allocated */
977 Log("VnLoad: Couldn't read vnode %u, volume %u (%s); read %d bytes, errno %d\n",
978 Vn_id(vnp), V_id(vp), V_name(vp), (int)nBytes, errno);
982 goto error_encountered_nolock;
987 /* Quick check to see that the data is reasonable */
988 if (vnp->disk.vnodeMagic != vcp->magic || vnp->disk.type == vNull) {
989 if (vnp->disk.type == vNull) {
993 struct vnodeIndex *index = &vp->vnodeIndex[class];
994 unsigned int bitNumber = vnodeIdToBitNumber(Vn_id(vnp));
995 unsigned int offset = bitNumber >> 3;
997 #ifdef AFS_DEMAND_ATTACH_FS
998 /* Make sure the volume bitmap isn't getting updated while we are
1000 VWaitExclusiveState_r(vp);
1003 /* Test to see if vnode number is valid. */
1004 if ((offset >= index->bitmapSize)
1005 || ((*(index->bitmap + offset) & (1 << (bitNumber & 0x7)))
1007 Log("VnLoad: Request for unallocated vnode %u, volume %u (%s) denied.\n", Vn_id(vnp), V_id(vp), V_name(vp));
1011 Log("VnLoad: Bad magic number, vnode %u, volume %u (%s); volume needs salvage\n", Vn_id(vnp), V_id(vp), V_name(vp));
1014 goto error_encountered;
1017 IH_INIT(vnp->handle, V_device(vp), V_parentId(vp), VN_GET_INO(vnp));
1018 VnUnlock(vnp, WRITE_LOCK);
1019 #ifdef AFS_DEMAND_ATTACH_FS
1020 VnChangeState_r(vnp, VN_STATE_ONLINE);
1025 error_encountered_nolock:
1027 FDH_REALLYCLOSE(fdP);
1033 #ifdef AFS_DEMAND_ATTACH_FS
1034 VRequestSalvage_r(&error, vp, SALVSYNC_ERROR, 0);
1036 VForceOffline_r(vp, 0);
1043 VInvalidateVnode_r(vnp);
1044 VnUnlock(vnp, WRITE_LOCK);
1048 * store a vnode to disk.
1050 * @param[out] ec error code output
1051 * @param[in] vp volume object pointer
1052 * @param[in] vnp vnode object pointer
1053 * @param[in] vcp vnode class info object pointer
1054 * @param[in] class vnode class enumeration
1056 * @pre VOL_LOCK held.
1057 * caller holds refs to volume and vnode.
1058 * DAFS: caller is responsible for performing state sanity checks.
1060 * @post vnode state is stored to disk.
1062 * @internal vnode package internal use only
1065 VnStore(Error * ec, Volume * vp, Vnode * vnp,
1066 struct VnodeClassInfo * vcp, VnodeClass class)
1070 IHandle_t *ihP = vp->vnodeIndex[class].handle;
1073 #ifdef AFS_DEMAND_ATTACH_FS
1074 VnState vn_state_save;
1079 #ifdef AFS_DEMAND_ATTACH_FS
1080 vn_state_save = VnChangeState_r(vnp, VN_STATE_STORE);
1083 offset = vnodeIndexOffset(vcp, Vn_id(vnp));
1087 Log("VnStore: can't open index file!\n");
1088 goto error_encountered;
1090 nBytes = FDH_PWRITE(fdP, &vnp->disk, vcp->diskSize, offset);
1091 if (nBytes != vcp->diskSize) {
1092 /* Don't force volume offline if the inumber is out of
1093 * range or the inode table is full.
1095 FDH_REALLYCLOSE(fdP);
1096 if (nBytes == BAD_IGET) {
1097 Log("VnStore: bad inumber %s\n",
1099 vp->vnodeIndex[class].handle->ih_ino));
1102 #ifdef AFS_DEMAND_ATTACH_FS
1103 VnChangeState_r(vnp, VN_STATE_ERROR);
1106 Log("VnStore: Couldn't write vnode %u, volume %u (%s) (error %d)\n", Vn_id(vnp), V_id(Vn_volume(vnp)), V_name(Vn_volume(vnp)), (int)nBytes);
1107 #ifdef AFS_DEMAND_ATTACH_FS
1108 goto error_encountered;
1111 VForceOffline_r(vp, 0);
1121 #ifdef AFS_DEMAND_ATTACH_FS
1122 VnChangeState_r(vnp, vn_state_save);
1127 #ifdef AFS_DEMAND_ATTACH_FS
1128 /* XXX instead of dumping core, let's try to request a salvage
1129 * and just fail the putvnode */
1133 VnChangeState_r(vnp, VN_STATE_ERROR);
1134 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, 0);
1141 * get a handle to a vnode object.
1143 * @param[out] ec error code
1144 * @param[in] vp volume object
1145 * @param[in] vnodeNumber vnode id
1146 * @param[in] locktype type of lock to acquire
1148 * @return vnode object pointer
1153 VGetVnode(Error * ec, Volume * vp, VnodeId vnodeNumber, int locktype)
1154 { /* READ_LOCK or WRITE_LOCK, as defined in lock.h */
1157 retVal = VGetVnode_r(ec, vp, vnodeNumber, locktype);
1163 * get a handle to a vnode object.
1165 * @param[out] ec error code
1166 * @param[in] vp volume object
1167 * @param[in] vnodeNumber vnode id
1168 * @param[in] locktype type of lock to acquire
1170 * @return vnode object pointer
1172 * @internal vnode package internal use only
1174 * @pre VOL_LOCK held.
1175 * heavyweight ref held on volume object.
1178 VGetVnode_r(Error * ec, Volume * vp, VnodeId vnodeNumber, int locktype)
1179 { /* READ_LOCK or WRITE_LOCK, as defined in lock.h */
1182 struct VnodeClassInfo *vcp;
1186 if (vnodeNumber == 0) {
1191 VNLog(100, 1, vnodeNumber, 0, 0, 0);
1193 #ifdef AFS_DEMAND_ATTACH_FS
1195 * once a volume has entered an error state, don't permit
1196 * further operations to proceed
1197 * -- tkeiser 11/21/2007
1199 VWaitExclusiveState_r(vp);
1200 if (VIsErrorState(V_attachState(vp))) {
1201 /* XXX is VSALVAGING acceptable here? */
1207 if (programType == fileServer && !V_inUse(vp)) {
1208 *ec = (vp->specialStatus ? vp->specialStatus : VOFFLINE);
1210 /* If the volume is VBUSY (being cloned or dumped) and this is
1211 * a READ operation, then don't fail.
1213 if ((*ec != VBUSY) || (locktype != READ_LOCK)) {
1218 class = vnodeIdToClass(vnodeNumber);
1219 vcp = &VnodeClassInfo[class];
1220 if (locktype == WRITE_LOCK && !VolumeWriteable(vp)) {
1221 *ec = (bit32) VREADONLY;
1225 if (locktype == WRITE_LOCK && programType == fileServer) {
1226 VAddToVolumeUpdateList_r(ec, vp);
1234 /* See whether the vnode is in the cache. */
1235 vnp = VLookupVnode(vp, vnodeNumber);
1237 /* vnode is in cache */
1239 VNLog(101, 2, vnodeNumber, (intptr_t)vnp, 0, 0);
1240 VnCreateReservation_r(vnp);
1242 #ifdef AFS_DEMAND_ATTACH_FS
1244 * this is the one DAFS case where we may run into contention.
1245 * here's the basic control flow:
1247 * if locktype is READ_LOCK:
1248 * wait until vnode is not exclusive
1249 * set to VN_STATE_READ
1250 * increment read count
1253 * wait until vnode is quiescent
1254 * set to VN_STATE_EXCLUSIVE
1257 if (locktype == READ_LOCK) {
1258 VnWaitExclusiveState_r(vnp);
1260 VnWaitQuiescent_r(vnp);
1263 if (VnIsErrorState(Vn_state(vnp))) {
1264 VnCancelReservation_r(vnp);
1268 #endif /* AFS_DEMAND_ATTACH_FS */
1270 /* vnode not cached */
1272 /* Not in cache; tentatively grab most distantly used one from the LRU
1275 vnp = VGetFreeVnode_r(vcp, vp, vnodeNumber);
1278 vnp->changed_newTime = vnp->changed_oldTime = 0;
1282 * XXX for non-DAFS, there is a serious
1283 * race condition here:
1285 * two threads can race to load a vnode. the net
1286 * result is two struct Vnodes can be allocated
1287 * and hashed, which point to the same underlying
1288 * disk data store. conflicting vnode locks can
1289 * thus be held concurrently.
1291 * for non-DAFS to be safe, VOL_LOCK really shouldn't
1292 * be dropped in VnLoad. Of course, this would likely
1293 * lead to an unacceptable slow-down.
1296 VnLoad(ec, vp, vnp, vcp, class);
1298 VnCancelReservation_r(vnp);
1301 #ifndef AFS_DEMAND_ATTACH_FS
1306 * there is no possibility for contention. we "own" this vnode.
1312 * it is imperative that nothing drop vol lock between here
1313 * and the VnBeginRead/VnChangeState stanza below
1316 VnLock(vnp, locktype, VOL_LOCK_HELD, MIGHT_DEADLOCK);
1318 /* Check that the vnode hasn't been removed while we were obtaining
1320 VNLog(102, 2, vnodeNumber, (intptr_t) vnp, 0, 0);
1321 if ((vnp->disk.type == vNull) || (Vn_cacheCheck(vnp) == 0)) {
1322 VnUnlock(vnp, locktype);
1323 VnCancelReservation_r(vnp);
1325 /* vnode is labelled correctly by now, so we don't have to invalidate it */
1329 #ifdef AFS_DEMAND_ATTACH_FS
1330 if (locktype == READ_LOCK) {
1333 VnChangeState_r(vnp, VN_STATE_EXCLUSIVE);
1337 if (programType == fileServer)
1338 VBumpVolumeUsage_r(Vn_volume(vnp)); /* Hack; don't know where it should be
1339 * called from. Maybe VGetVolume */
1344 int TrustVnodeCacheEntry = 1;
1345 /* This variable is bogus--when it's set to 0, the hash chains fill
1346 up with multiple versions of the same vnode. Should fix this!! */
1348 VPutVnode(Error * ec, Vnode * vnp)
1351 VPutVnode_r(ec, vnp);
1356 * put back a handle to a vnode object.
1358 * @param[out] ec client error code
1359 * @param[in] vnp vnode object pointer
1361 * @pre VOL_LOCK held.
1362 * ref held on vnode.
1364 * @post ref dropped on vnode.
1365 * if vnode was modified or deleted, it is written out to disk
1366 * (assuming a write lock was held).
1368 * @internal volume package internal use only
1371 VPutVnode_r(Error * ec, Vnode * vnp)
1375 struct VnodeClassInfo *vcp;
1378 osi_Assert(Vn_refcount(vnp) != 0);
1379 class = vnodeIdToClass(Vn_id(vnp));
1380 vcp = &VnodeClassInfo[class];
1381 osi_Assert(vnp->disk.vnodeMagic == vcp->magic);
1382 VNLog(200, 2, Vn_id(vnp), (intptr_t) vnp, 0, 0);
1384 #ifdef AFS_DEMAND_ATTACH_FS
1385 writeLocked = (Vn_state(vnp) == VN_STATE_EXCLUSIVE);
1387 writeLocked = WriteLocked(&vnp->lock);
1392 #ifdef AFS_PTHREAD_ENV
1393 pthread_t thisProcess = pthread_self();
1394 #else /* AFS_PTHREAD_ENV */
1395 PROCESS thisProcess;
1396 LWP_CurrentProcess(&thisProcess);
1397 #endif /* AFS_PTHREAD_ENV */
1398 VNLog(201, 2, (intptr_t) vnp,
1399 ((vnp->changed_newTime) << 1) | ((vnp->
1400 changed_oldTime) << 1) | vnp->
1402 if (thisProcess != vnp->writer)
1403 Abort("VPutVnode: Vnode at %"AFS_PTR_FMT" locked by another process!\n",
1407 if (vnp->changed_oldTime || vnp->changed_newTime || vnp->delete) {
1408 Volume *vp = Vn_volume(vnp);
1409 afs_uint32 now = FT_ApproxTime();
1410 osi_Assert(Vn_cacheCheck(vnp) == vp->cacheCheck);
1413 /* No longer any directory entries for this vnode. Free the Vnode */
1414 memset(&vnp->disk, 0, sizeof(vnp->disk));
1415 /* delete flag turned off further down */
1416 VNLog(202, 2, Vn_id(vnp), (intptr_t) vnp, 0, 0);
1417 } else if (vnp->changed_newTime) {
1418 vnp->disk.serverModifyTime = now;
1420 if (vnp->changed_newTime)
1422 V_updateDate(vp) = vp->updateTime = now;
1423 if(V_volUpCounter(vp)< UINT_MAX)
1424 V_volUpCounter(vp)++;
1427 /* The vnode has been changed. Write it out to disk */
1429 #ifdef AFS_DEMAND_ATTACH_FS
1430 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, 0);
1432 osi_Assert(V_needsSalvaged(vp));
1436 VnStore(ec, vp, vnp, vcp, class);
1438 /* If the vnode is to be deleted, and we wrote the vnode out,
1439 * free its bitmap entry. Do after the vnode is written so we
1440 * don't allocate from bitmap before the vnode is written
1441 * (doing so could cause a "addled bitmap" message).
1443 if (vnp->delete && !*ec) {
1444 if (Vn_volume(vnp)->header->diskstuff.filecount-- < 1)
1445 Vn_volume(vnp)->header->diskstuff.filecount = 0;
1446 VFreeBitMapEntry_r(ec, vp, &vp->vnodeIndex[class],
1447 vnodeIdToBitNumber(Vn_id(vnp)),
1448 VOL_FREE_BITMAP_WAIT);
1452 vnp->changed_newTime = vnp->changed_oldTime = 0;
1454 #ifdef AFS_DEMAND_ATTACH_FS
1455 VnChangeState_r(vnp, VN_STATE_ONLINE);
1457 } else { /* Not write locked */
1458 if (vnp->changed_newTime || vnp->changed_oldTime || vnp->delete)
1460 ("VPutVnode: Change or delete flag for vnode "
1461 "%"AFS_PTR_FMT" is set but vnode is not write locked!\n",
1463 #ifdef AFS_DEMAND_ATTACH_FS
1468 /* Do not look at disk portion of vnode after this point; it may
1469 * have been deleted above */
1471 VnUnlock(vnp, ((writeLocked) ? WRITE_LOCK : READ_LOCK));
1472 VnCancelReservation_r(vnp);
1476 * Make an attempt to convert a vnode lock from write to read.
1477 * Do nothing if the vnode isn't write locked or the vnode has
1481 VVnodeWriteToRead(Error * ec, Vnode * vnp)
1485 retVal = VVnodeWriteToRead_r(ec, vnp);
1491 * convert vnode handle from mutually exclusive to shared access.
1493 * @param[out] ec client error code
1494 * @param[in] vnp vnode object pointer
1496 * @return unspecified use (see out argument 'ec' for error code return)
1498 * @pre VOL_LOCK held.
1499 * ref held on vnode.
1500 * write lock held on vnode.
1502 * @post read lock held on vnode.
1503 * if vnode was modified, it has been written to disk.
1505 * @internal volume package internal use only
1508 VVnodeWriteToRead_r(Error * ec, Vnode * vnp)
1512 struct VnodeClassInfo *vcp;
1513 #ifdef AFS_PTHREAD_ENV
1514 pthread_t thisProcess;
1515 #else /* AFS_PTHREAD_ENV */
1516 PROCESS thisProcess;
1517 #endif /* AFS_PTHREAD_ENV */
1520 osi_Assert(Vn_refcount(vnp) != 0);
1521 class = vnodeIdToClass(Vn_id(vnp));
1522 vcp = &VnodeClassInfo[class];
1523 osi_Assert(vnp->disk.vnodeMagic == vcp->magic);
1524 VNLog(300, 2, Vn_id(vnp), (intptr_t) vnp, 0, 0);
1526 #ifdef AFS_DEMAND_ATTACH_FS
1527 writeLocked = (Vn_state(vnp) == VN_STATE_EXCLUSIVE);
1529 writeLocked = WriteLocked(&vnp->lock);
1536 VNLog(301, 2, (intptr_t) vnp,
1537 ((vnp->changed_newTime) << 1) | ((vnp->
1538 changed_oldTime) << 1) | vnp->
1542 #ifdef AFS_PTHREAD_ENV
1543 thisProcess = pthread_self();
1544 #else /* AFS_PTHREAD_ENV */
1545 LWP_CurrentProcess(&thisProcess);
1546 #endif /* AFS_PTHREAD_ENV */
1547 if (thisProcess != vnp->writer)
1548 Abort("VPutVnode: Vnode at %"AFS_PTR_FMT
1549 " locked by another process!\n", vnp);
1554 if (vnp->changed_oldTime || vnp->changed_newTime) {
1555 Volume *vp = Vn_volume(vnp);
1556 afs_uint32 now = FT_ApproxTime();
1557 osi_Assert(Vn_cacheCheck(vnp) == vp->cacheCheck);
1558 if (vnp->changed_newTime)
1559 vnp->disk.serverModifyTime = now;
1560 if (vnp->changed_newTime)
1561 V_updateDate(vp) = vp->updateTime = now;
1563 /* The inode has been changed. Write it out to disk */
1565 #ifdef AFS_DEMAND_ATTACH_FS
1566 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, 0);
1568 osi_Assert(V_needsSalvaged(vp));
1572 VnStore(ec, vp, vnp, vcp, class);
1575 vnp->changed_newTime = vnp->changed_oldTime = 0;
1579 #ifdef AFS_DEMAND_ATTACH_FS
1580 VnChangeState_r(vnp, VN_STATE_ONLINE);
1583 ConvertWriteToReadLock(&vnp->lock);
1589 * initial size of ihandle pointer vector.
1591 * @see VInvalidateVnodesByVolume_r
1593 #define IH_VEC_BASE_SIZE 256
1596 * increment amount for growing ihandle pointer vector.
1598 * @see VInvalidateVnodesByVolume_r
1600 #define IH_VEC_INCREMENT 256
1603 * Compile list of ihandles to be released/reallyclosed at a later time.
1605 * @param[in] vp volume object pointer
1606 * @param[out] vec_out vector of ihandle pointers to be released/reallyclosed
1607 * @param[out] vec_len_out number of valid elements in ihandle vector
1609 * @pre - VOL_LOCK is held
1610 * - volume is in appropriate exclusive state (e.g. VOL_STATE_VNODE_CLOSE,
1611 * VOL_STATE_VNODE_RELEASE)
1613 * @post - all vnodes on VVn list are invalidated
1614 * - ih_vec is populated with all valid ihandles
1616 * @return operation status
1618 * @retval ENOMEM out of memory
1620 * @todo we should handle out of memory conditions more gracefully.
1622 * @internal vnode package internal use only
1625 VInvalidateVnodesByVolume_r(Volume * vp,
1626 IHandle_t *** vec_out,
1627 size_t * vec_len_out)
1631 size_t i = 0, vec_len;
1632 IHandle_t **ih_vec, **ih_vec_new;
1634 #ifdef AFS_DEMAND_ATTACH_FS
1636 #endif /* AFS_DEMAND_ATTACH_FS */
1638 vec_len = IH_VEC_BASE_SIZE;
1639 ih_vec = malloc(sizeof(IHandle_t *) * vec_len);
1640 #ifdef AFS_DEMAND_ATTACH_FS
1647 * Traverse the volume's vnode list. Pull all the ihandles out into a
1648 * thread-private array for later asynchronous processing.
1650 #ifdef AFS_DEMAND_ATTACH_FS
1653 for (queue_Scan(&vp->vnode_list, vnp, nvnp, Vnode)) {
1654 if (vnp->handle != NULL) {
1656 #ifdef AFS_DEMAND_ATTACH_FS
1659 vec_len += IH_VEC_INCREMENT;
1660 ih_vec_new = realloc(ih_vec, sizeof(IHandle_t *) * vec_len);
1661 #ifdef AFS_DEMAND_ATTACH_FS
1664 if (ih_vec_new == NULL) {
1668 ih_vec = ih_vec_new;
1669 #ifdef AFS_DEMAND_ATTACH_FS
1671 * Theoretically, the volume's VVn list should not change
1672 * because the volume is in an exclusive state. For the
1673 * sake of safety, we will restart the traversal from the
1674 * the beginning (which is not expensive because we're
1675 * deleting the items from the list as we go).
1677 goto restart_traversal;
1680 ih_vec[i++] = vnp->handle;
1683 DeleteFromVVnList(vnp);
1684 VInvalidateVnode_r(vnp);
1694 /* VCloseVnodeFiles - called when a volume is going off line. All open
1695 * files for vnodes in that volume are closed. This might be excessive,
1696 * since we may only be taking one volume of a volume group offline.
1699 VCloseVnodeFiles_r(Volume * vp)
1701 #ifdef AFS_DEMAND_ATTACH_FS
1702 VolState vol_state_save;
1704 IHandle_t ** ih_vec;
1707 #ifdef AFS_DEMAND_ATTACH_FS
1708 vol_state_save = VChangeState_r(vp, VOL_STATE_VNODE_CLOSE);
1709 #endif /* AFS_DEMAND_ATTACH_FS */
1711 /* XXX need better error handling here */
1712 osi_Assert(VInvalidateVnodesByVolume_r(vp,
1718 * now we drop VOL_LOCK while we perform some potentially very
1719 * expensive operations in the background
1721 #ifdef AFS_DEMAND_ATTACH_FS
1725 for (i = 0; i < vec_len; i++) {
1726 IH_REALLYCLOSE(ih_vec[i]);
1727 IH_RELEASE(ih_vec[i]);
1732 #ifdef AFS_DEMAND_ATTACH_FS
1734 VChangeState_r(vp, vol_state_save);
1735 #endif /* AFS_DEMAND_ATTACH_FS */
1740 * shut down all vnode cache state for a given volume.
1742 * @param[in] vp volume object pointer
1744 * @pre VOL_LOCK is held
1746 * @post all file descriptors closed.
1747 * all inode handles released.
1748 * all vnode cache objects disassociated from volume.
1750 * @note for DAFS, these operations are performed outside the vol glock under
1751 * volume exclusive state VOL_STATE_VNODE_RELEASE. Please further note
1752 * that it would be a bug to acquire and release a volume reservation
1753 * during this exclusive operation. This is due to the fact that we are
1754 * generally called during the refcount 1->0 transition.
1756 * @todo we should handle failures in VInvalidateVnodesByVolume_r more
1759 * @see VInvalidateVnodesByVolume_r
1761 * @internal this routine is internal to the volume package
1764 VReleaseVnodeFiles_r(Volume * vp)
1766 #ifdef AFS_DEMAND_ATTACH_FS
1767 VolState vol_state_save;
1769 IHandle_t ** ih_vec;
1772 #ifdef AFS_DEMAND_ATTACH_FS
1773 vol_state_save = VChangeState_r(vp, VOL_STATE_VNODE_RELEASE);
1774 #endif /* AFS_DEMAND_ATTACH_FS */
1776 /* XXX need better error handling here */
1777 osi_Assert(VInvalidateVnodesByVolume_r(vp,
1783 * now we drop VOL_LOCK while we perform some potentially very
1784 * expensive operations in the background
1786 #ifdef AFS_DEMAND_ATTACH_FS
1790 for (i = 0; i < vec_len; i++) {
1791 IH_RELEASE(ih_vec[i]);
1796 #ifdef AFS_DEMAND_ATTACH_FS
1798 VChangeState_r(vp, vol_state_save);
1799 #endif /* AFS_DEMAND_ATTACH_FS */