2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
9 * Portions Copyright (c) 2005-2008 Sine Nomine Associates
15 Institution: The Information Technology Center, Carnegie-Mellon University
18 #include <afsconfig.h>
19 #include <afs/param.h>
23 #define MAXINT (~(1<<((sizeof(int)*8)-1)))
30 #include <afs/afs_assert.h>
33 #include "rx/rx_queue.h"
34 #include <afs/afsint.h>
36 #include <afs/errors.h>
39 #include <afs/afssyscalls.h>
43 #include "volume_inline.h"
44 #include "vnode_inline.h"
45 #include "partition.h"
48 #if defined(AFS_SGI_ENV)
49 #include "sys/types.h"
61 #include <sys/fcntl.h>
64 #endif /* AFS_NT40_ENV */
71 struct VnodeClassInfo VnodeClassInfo[nVNODECLASSES];
73 void VNLog(afs_int32 aop, afs_int32 anparms, ... );
80 #define BAD_IGET -1000
82 /* There are two separate vnode queue types defined here:
83 * Each hash conflict chain -- is singly linked, with a single head
84 * pointer. New entries are added at the beginning. Old
85 * entries are removed by linear search, which generally
86 * only occurs after a disk read).
87 * LRU chain -- is doubly linked, single head pointer.
88 * Entries are added at the head, reclaimed from the tail,
89 * or removed from anywhere in the queue.
93 /* Vnode hash table. Find hash chain by taking lower bits of
94 * (volume_hash_offset + vnode).
95 * This distributes the root inodes of the volumes over the
96 * hash table entries and also distributes the vnodes of
97 * volumes reasonably fairly. The volume_hash_offset field
98 * for each volume is established as the volume comes on line
99 * by using the VOLUME_HASH_OFFSET macro. This distributes the
100 * volumes fairly among the cache entries, both when servicing
101 * a small number of volumes and when servicing a large number.
104 /* logging stuff for finding bugs */
105 #define THELOGSIZE 5120
106 static afs_int32 theLog[THELOGSIZE];
107 static afs_int32 vnLogPtr = 0;
109 VNLog(afs_int32 aop, afs_int32 anparms, ... )
114 va_start(ap, anparms);
117 anparms = 4; /* do bounds checking */
119 temp = (aop << 16) | anparms;
120 theLog[vnLogPtr++] = temp;
121 if (vnLogPtr >= THELOGSIZE)
123 for (temp = 0; temp < anparms; temp++) {
124 theLog[vnLogPtr++] = va_arg(ap, afs_int32);
125 if (vnLogPtr >= THELOGSIZE)
131 /* VolumeHashOffset -- returns a new value to be stored in the
132 * volumeHashOffset of a Volume structure. Called when a
133 * volume is initialized. Sets the volumeHashOffset so that
134 * vnode cache entries are distributed reasonably between
135 * volumes (the root vnodes of the volumes will hash to
136 * different values, and spacing is maintained between volumes
137 * when there are not many volumes represented), and spread
138 * equally amongst vnodes within a single volume.
141 VolumeHashOffset_r(void)
143 static int nextVolumeHashOffset = 0;
144 /* hashindex Must be power of two in size */
146 # define hashMask ((1<<hashShift)-1)
147 static byte hashindex[1 << hashShift] =
148 { 0, 128, 64, 192, 32, 160, 96, 224 };
150 offset = hashindex[nextVolumeHashOffset & hashMask]
151 + (nextVolumeHashOffset >> hashShift);
152 nextVolumeHashOffset++;
156 /* Change hashindex (above) if you change this constant */
157 #define VNODE_HASH_TABLE_SIZE 256
158 private Vnode *VnodeHashTable[VNODE_HASH_TABLE_SIZE];
159 #define VNODE_HASH(volumeptr,vnodenumber)\
160 ((volumeptr->vnodeHashOffset + vnodenumber)&(VNODE_HASH_TABLE_SIZE-1))
164 * add a vnode to the volume's vnode list.
166 * @param[in] vp volume object pointer
167 * @param[in] vnp vnode object pointer
169 * @note for DAFS, it may seem like we should be acquiring a lightweight ref
170 * on vp, but this would actually break things. Right now, this is ok
171 * because we destroy all vnode cache contents during during volume
176 * @internal volume package internal use only
179 AddToVVnList(Volume * vp, Vnode * vnp)
181 if (queue_IsOnQueue(vnp))
185 Vn_cacheCheck(vnp) = vp->cacheCheck;
186 queue_Append(&vp->vnode_list, vnp);
187 Vn_stateFlags(vnp) |= VN_ON_VVN;
191 * delete a vnode from the volume's vnode list.
195 * @internal volume package internal use only
198 DeleteFromVVnList(Vnode * vnp)
200 Vn_volume(vnp) = NULL;
202 if (!queue_IsOnQueue(vnp))
206 Vn_stateFlags(vnp) &= ~(VN_ON_VVN);
210 * add a vnode to the end of the lru.
212 * @param[in] vcp vnode class info object pointer
213 * @param[in] vnp vnode object pointer
215 * @internal vnode package internal use only
218 AddToVnLRU(struct VnodeClassInfo * vcp, Vnode * vnp)
220 if (Vn_stateFlags(vnp) & VN_ON_LRU) {
224 /* Add it to the circular LRU list */
225 if (vcp->lruHead == NULL)
226 Abort("VPutVnode: vcp->lruHead==NULL");
228 vnp->lruNext = vcp->lruHead;
229 vnp->lruPrev = vcp->lruHead->lruPrev;
230 vcp->lruHead->lruPrev = vnp;
231 vnp->lruPrev->lruNext = vnp;
235 /* If the vnode was just deleted, put it at the end of the chain so it
236 * will be reused immediately */
238 vcp->lruHead = vnp->lruNext;
240 Vn_stateFlags(vnp) |= VN_ON_LRU;
244 * delete a vnode from the lru.
246 * @param[in] vcp vnode class info object pointer
247 * @param[in] vnp vnode object pointer
249 * @internal vnode package internal use only
252 DeleteFromVnLRU(struct VnodeClassInfo * vcp, Vnode * vnp)
254 if (!(Vn_stateFlags(vnp) & VN_ON_LRU)) {
258 if (vnp == vcp->lruHead)
259 vcp->lruHead = vcp->lruHead->lruNext;
261 if ((vnp == vcp->lruHead) ||
262 (vcp->lruHead == NULL))
263 Abort("DeleteFromVnLRU: lru chain addled!\n");
265 vnp->lruPrev->lruNext = vnp->lruNext;
266 vnp->lruNext->lruPrev = vnp->lruPrev;
268 Vn_stateFlags(vnp) &= ~(VN_ON_LRU);
272 * add a vnode to the vnode hash table.
274 * @param[in] vnp vnode object pointer
278 * @post vnode on hash
280 * @internal vnode package internal use only
283 AddToVnHash(Vnode * vnp)
285 unsigned int newHash;
287 if (!(Vn_stateFlags(vnp) & VN_ON_HASH)) {
288 newHash = VNODE_HASH(Vn_volume(vnp), Vn_id(vnp));
289 vnp->hashNext = VnodeHashTable[newHash];
290 VnodeHashTable[newHash] = vnp;
291 vnp->hashIndex = newHash;
293 Vn_stateFlags(vnp) |= VN_ON_HASH;
298 * delete a vnode from the vnode hash table.
305 * @post vnode removed from hash
307 * @internal vnode package internal use only
310 DeleteFromVnHash(Vnode * vnp)
314 if (Vn_stateFlags(vnp) & VN_ON_HASH) {
315 tvnp = VnodeHashTable[vnp->hashIndex];
317 VnodeHashTable[vnp->hashIndex] = vnp->hashNext;
319 while (tvnp && tvnp->hashNext != vnp)
320 tvnp = tvnp->hashNext;
322 tvnp->hashNext = vnp->hashNext;
325 vnp->hashNext = NULL;
327 Vn_stateFlags(vnp) &= ~(VN_ON_HASH);
333 * invalidate a vnode cache entry.
335 * @param[in] avnode vnode object pointer
339 * @post vnode metadata invalidated.
340 * vnode removed from hash table.
341 * DAFS: vnode state set to VN_STATE_INVALID.
343 * @internal vnode package internal use only
346 VInvalidateVnode_r(struct Vnode *avnode)
348 avnode->changed_newTime = 0; /* don't let it get flushed out again */
349 avnode->changed_oldTime = 0;
350 avnode->delete = 0; /* it isn't deleted, really */
351 avnode->cacheCheck = 0; /* invalid: prevents future vnode searches from working */
352 DeleteFromVnHash(avnode);
353 #ifdef AFS_DEMAND_ATTACH_FS
354 VnChangeState_r(avnode, VN_STATE_INVALID);
360 * initialize vnode cache for a given vnode class.
362 * @param[in] class vnode class
363 * @param[in] nVnodes size of cache
365 * @post vnode cache allocated and initialized
367 * @internal volume package internal use only
369 * @note generally called by VInitVolumePackage_r
371 * @see VInitVolumePackage_r
374 VInitVnodes(VnodeClass class, int nVnodes)
377 struct VnodeClassInfo *vcp = &VnodeClassInfo[class];
379 vcp->allocs = vcp->gets = vcp->reads = vcp->writes = 0;
380 vcp->cacheSize = nVnodes;
383 osi_Assert(CHECKSIZE_SMALLVNODE);
385 vcp->residentSize = SIZEOF_SMALLVNODE;
386 vcp->diskSize = SIZEOF_SMALLDISKVNODE;
387 vcp->magic = SMALLVNODEMAGIC;
391 vcp->residentSize = SIZEOF_LARGEVNODE;
392 vcp->diskSize = SIZEOF_LARGEDISKVNODE;
393 vcp->magic = LARGEVNODEMAGIC;
397 int s = vcp->diskSize - 1;
407 va = (byte *) calloc(nVnodes, vcp->residentSize);
408 osi_Assert(va != NULL);
410 Vnode *vnp = (Vnode *) va;
411 Vn_refcount(vnp) = 0; /* no context switches */
412 Vn_stateFlags(vnp) |= VN_ON_LRU;
413 #ifdef AFS_DEMAND_ATTACH_FS
414 CV_INIT(&Vn_stateCV(vnp), "vnode state", CV_DEFAULT, 0);
415 Vn_state(vnp) = VN_STATE_INVALID;
417 #else /* !AFS_DEMAND_ATTACH_FS */
418 Lock_Init(&vnp->lock);
419 #endif /* !AFS_DEMAND_ATTACH_FS */
420 vnp->changed_oldTime = 0;
421 vnp->changed_newTime = 0;
422 Vn_volume(vnp) = NULL;
423 Vn_cacheCheck(vnp) = 0;
424 vnp->delete = Vn_id(vnp) = 0;
425 #ifdef AFS_PTHREAD_ENV
426 vnp->writer = (pthread_t) 0;
427 #else /* AFS_PTHREAD_ENV */
428 vnp->writer = (PROCESS) 0;
429 #endif /* AFS_PTHREAD_ENV */
433 if (vcp->lruHead == NULL)
434 vcp->lruHead = vnp->lruNext = vnp->lruPrev = vnp;
436 vnp->lruNext = vcp->lruHead;
437 vnp->lruPrev = vcp->lruHead->lruPrev;
438 vcp->lruHead->lruPrev = vnp;
439 vnp->lruPrev->lruNext = vnp;
442 va += vcp->residentSize;
449 * allocate an unused vnode from the lru chain.
451 * @param[in] vcp vnode class info object pointer
453 * @pre VOL_LOCK is held
455 * @post vnode object is removed from lru, and vnode hash table.
456 * vnode is disassociated from volume object.
457 * state is set to VN_STATE_INVALID.
458 * inode handle is released.
460 * @note we traverse backwards along the lru circlist. It shouldn't
461 * be necessary to specify that nUsers == 0 since if it is in the list,
462 * nUsers should be 0. Things shouldn't be in lruq unless no one is
465 * @warning DAFS: VOL_LOCK is dropped while doing inode handle release
467 * @return vnode object pointer
470 VGetFreeVnode_r(struct VnodeClassInfo * vcp)
474 vnp = vcp->lruHead->lruPrev;
475 #ifdef AFS_DEMAND_ATTACH_FS
476 if (Vn_refcount(vnp) != 0 || VnIsExclusiveState(Vn_state(vnp)) ||
477 Vn_readers(vnp) != 0)
478 Abort("VGetFreeVnode_r: in-use vnode in lruq");
480 if (Vn_refcount(vnp) != 0 || CheckLock(&vnp->lock))
481 Abort("VGetFreeVnode_r: locked vnode in lruq");
483 VNLog(1, 2, Vn_id(vnp), (intptr_t)vnp, 0, 0);
486 * it's going to be overwritten soon enough.
487 * remove from LRU, delete hash entry, and
488 * disassociate from old parent volume before
489 * we have a chance to drop the vol glock
491 DeleteFromVnLRU(vcp, vnp);
492 DeleteFromVnHash(vnp);
493 if (Vn_volume(vnp)) {
494 DeleteFromVVnList(vnp);
497 /* drop the file descriptor */
499 #ifdef AFS_DEMAND_ATTACH_FS
500 VnChangeState_r(vnp, VN_STATE_RELEASING);
503 /* release is, potentially, a highly latent operation due to a couple
505 * - ihandle package lock contention
506 * - closing file descriptor(s) associated with ih
508 * Hance, we perform outside of the volume package lock in order to
509 * reduce the probability of contention.
511 IH_RELEASE(vnp->handle);
512 #ifdef AFS_DEMAND_ATTACH_FS
517 #ifdef AFS_DEMAND_ATTACH_FS
518 VnChangeState_r(vnp, VN_STATE_INVALID);
526 * lookup a vnode in the vnode cache hash table.
528 * @param[in] vp pointer to volume object
529 * @param[in] vnodeId vnode id
533 * @post matching vnode object or NULL is returned
535 * @return vnode object pointer
536 * @retval NULL no matching vnode object was found in the cache
538 * @internal vnode package internal use only
540 * @note this symbol is exported strictly for fssync debug protocol use
543 VLookupVnode(Volume * vp, VnodeId vnodeId)
546 unsigned int newHash;
548 newHash = VNODE_HASH(vp, vnodeId);
549 for (vnp = VnodeHashTable[newHash];
551 ((Vn_id(vnp) != vnodeId) ||
552 (Vn_volume(vnp) != vp) ||
553 (vp->cacheCheck != Vn_cacheCheck(vnp))));
554 vnp = vnp->hashNext);
561 VAllocVnode(Error * ec, Volume * vp, VnodeType type)
565 retVal = VAllocVnode_r(ec, vp, type);
571 * allocate a new vnode.
573 * @param[out] ec error code return
574 * @param[in] vp volume object pointer
575 * @param[in] type desired vnode type
577 * @return vnode object pointer
579 * @pre VOL_LOCK held;
580 * heavyweight ref held on vp
582 * @post vnode allocated and returned
585 VAllocVnode_r(Error * ec, Volume * vp, VnodeType type)
590 struct VnodeClassInfo *vcp;
593 #ifdef AFS_DEMAND_ATTACH_FS
594 VolState vol_state_save;
599 #ifdef AFS_DEMAND_ATTACH_FS
601 * once a volume has entered an error state, don't permit
602 * further operations to proceed
603 * -- tkeiser 11/21/2007
605 VWaitExclusiveState_r(vp);
606 if (VIsErrorState(V_attachState(vp))) {
607 /* XXX is VSALVAGING acceptable here? */
613 if (programType == fileServer && !V_inUse(vp)) {
614 if (vp->specialStatus) {
615 *ec = vp->specialStatus;
621 class = vnodeTypeToClass(type);
622 vcp = &VnodeClassInfo[class];
624 if (!VolumeWriteable(vp)) {
625 *ec = (bit32) VREADONLY;
629 unique = vp->nextVnodeUnique++;
631 unique = vp->nextVnodeUnique++;
633 if (vp->nextVnodeUnique > V_uniquifier(vp)) {
634 VUpdateVolume_r(ec, vp, 0);
639 if (programType == fileServer) {
640 VAddToVolumeUpdateList_r(ec, vp);
645 /* Find a slot in the bit map */
646 bitNumber = VAllocBitmapEntry_r(ec, vp, &vp->vnodeIndex[class],
647 VOL_ALLOC_BITMAP_WAIT);
650 vnodeNumber = bitNumberToVnodeNumber(bitNumber, class);
654 * at this point we should be assured that V_attachState(vp) is non-exclusive
658 VNLog(2, 1, vnodeNumber, 0, 0, 0);
659 /* Prepare to move it to the new hash chain */
660 vnp = VLookupVnode(vp, vnodeNumber);
662 /* slot already exists. May even not be in lruq (consider store file locking a file being deleted)
663 * so we may have to wait for it below */
664 VNLog(3, 2, vnodeNumber, (intptr_t)vnp, 0, 0);
666 VnCreateReservation_r(vnp);
667 if (Vn_refcount(vnp) == 1) {
668 /* we're the only user */
669 /* This won't block */
670 VnLock(vnp, WRITE_LOCK, VOL_LOCK_HELD, WILL_NOT_DEADLOCK);
672 /* other users present; follow locking hierarchy */
673 VnLock(vnp, WRITE_LOCK, VOL_LOCK_HELD, MIGHT_DEADLOCK);
675 #ifdef AFS_DEMAND_ATTACH_FS
678 * vnode was cached, wait for any existing exclusive ops to finish.
679 * once we have reacquired the lock, re-verify volume state.
681 * note: any vnode error state is related to the old vnode; disregard.
683 VnWaitQuiescent_r(vnp);
684 if (VIsErrorState(V_attachState(vp))) {
685 VnUnlock(vnp, WRITE_LOCK);
686 VnCancelReservation_r(vnp);
693 * verify state of the world hasn't changed
695 * (technically, this should never happen because cachecheck
696 * is only updated during a volume attach, which should not
697 * happen when refs are held)
699 if (Vn_volume(vnp)->cacheCheck != Vn_cacheCheck(vnp)) {
700 VnUnlock(vnp, WRITE_LOCK);
701 VnCancelReservation_r(vnp);
707 /* no such vnode in the cache */
709 vnp = VGetFreeVnode_r(vcp);
711 /* Initialize the header fields so noone allocates another
712 * vnode with the same number */
713 Vn_id(vnp) = vnodeNumber;
714 VnCreateReservation_r(vnp);
715 AddToVVnList(vp, vnp);
716 #ifdef AFS_DEMAND_ATTACH_FS
720 /* This will never block (guaranteed by check in VGetFreeVnode_r() */
721 VnLock(vnp, WRITE_LOCK, VOL_LOCK_HELD, WILL_NOT_DEADLOCK);
723 #ifdef AFS_DEMAND_ATTACH_FS
724 VnChangeState_r(vnp, VN_STATE_ALLOC);
727 /* Sanity check: is this vnode really not in use? */
730 IHandle_t *ihP = vp->vnodeIndex[class].handle;
732 afs_foff_t off = vnodeIndexOffset(vcp, vnodeNumber);
735 /* XXX we have a potential race here if two threads
736 * allocate new vnodes at the same time, and they
737 * both decide it's time to extend the index
740 #ifdef AFS_DEMAND_ATTACH_FS
742 * this race has been eliminated for the DAFS case
743 * using exclusive state VOL_STATE_VNODE_ALLOC
745 * if this becomes a bottleneck, there are ways to
746 * improve parallelism for this code path
747 * -- tkeiser 11/28/2007
749 VCreateReservation_r(vp);
750 VWaitExclusiveState_r(vp);
751 vol_state_save = VChangeState_r(vp, VOL_STATE_VNODE_ALLOC);
757 Log("VAllocVnode: can't open index file!\n");
759 goto error_encountered;
761 if ((size = FDH_SIZE(fdP)) < 0) {
762 Log("VAllocVnode: can't stat index file!\n");
764 goto error_encountered;
766 if (off + vcp->diskSize <= size) {
767 if (FDH_PREAD(fdP, &vnp->disk, vcp->diskSize, off) != vcp->diskSize) {
768 Log("VAllocVnode: can't read index file!\n");
770 goto error_encountered;
772 if (vnp->disk.type != vNull) {
773 Log("VAllocVnode: addled bitmap or index!\n");
775 goto error_encountered;
778 /* growing file - grow in a reasonable increment */
779 char *buf = (char *)malloc(16 * 1024);
781 Log("VAllocVnode: can't grow vnode index: out of memory\n");
783 goto error_encountered;
785 memset(buf, 0, 16 * 1024);
786 if ((FDH_PWRITE(fdP, buf, 16 * 1024, off)) != 16 * 1024) {
787 Log("VAllocVnode: can't grow vnode index: write failed\n");
790 goto error_encountered;
797 #ifdef AFS_DEMAND_ATTACH_FS
798 VChangeState_r(vp, vol_state_save);
799 VCancelReservation_r(vp);
806 * close the file handle
808 * invalidate the vnode
809 * free up the bitmap entry (although salvager should take care of it)
811 * drop vnode lock and refs
816 VFreeBitMapEntry_r(&tmp, &vp->vnodeIndex[class], bitNumber);
817 VInvalidateVnode_r(vnp);
818 VnUnlock(vnp, WRITE_LOCK);
819 VnCancelReservation_r(vnp);
820 #ifdef AFS_DEMAND_ATTACH_FS
821 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, 0);
822 VCancelReservation_r(vp);
824 VForceOffline_r(vp, 0);
829 VNLog(4, 2, vnodeNumber, (intptr_t)vnp, 0, 0);
830 #ifndef AFS_DEMAND_ATTACH_FS
835 VNLog(5, 1, (intptr_t)vnp, 0, 0, 0);
836 memset(&vnp->disk, 0, sizeof(vnp->disk));
837 vnp->changed_newTime = 0; /* set this bit when vnode is updated */
838 vnp->changed_oldTime = 0; /* set this on CopyOnWrite. */
840 vnp->disk.vnodeMagic = vcp->magic;
841 vnp->disk.type = type;
842 vnp->disk.uniquifier = unique;
845 vp->header->diskstuff.filecount++;
846 #ifdef AFS_DEMAND_ATTACH_FS
847 VnChangeState_r(vnp, VN_STATE_EXCLUSIVE);
853 * load a vnode from disk.
855 * @param[out] ec client error code return
856 * @param[in] vp volume object pointer
857 * @param[in] vnp vnode object pointer
858 * @param[in] vcp vnode class info object pointer
859 * @param[in] class vnode class enumeration
861 * @pre vnode is registered in appropriate data structures;
862 * caller holds a ref on vnode; VOL_LOCK is held
864 * @post vnode data is loaded from disk.
865 * vnode state is set to VN_STATE_ONLINE.
866 * on failure, vnode is invalidated.
868 * @internal vnode package internal use only
871 VnLoad(Error * ec, Volume * vp, Vnode * vnp,
872 struct VnodeClassInfo * vcp, VnodeClass class)
874 /* vnode not cached */
878 IHandle_t *ihP = vp->vnodeIndex[class].handle;
885 #ifdef AFS_DEMAND_ATTACH_FS
886 VnChangeState_r(vnp, VN_STATE_LOAD);
889 /* This will never block */
890 VnLock(vnp, WRITE_LOCK, VOL_LOCK_HELD, WILL_NOT_DEADLOCK);
895 Log("VnLoad: can't open index dev=%u, i=%s\n", vp->device,
896 PrintInode(stmp, vp->vnodeIndex[class].handle->ih_ino));
898 goto error_encountered_nolock;
899 } else if ((nBytes = FDH_PREAD(fdP, (char *)&vnp->disk, vcp->diskSize, vnodeIndexOffset(vcp, Vn_id(vnp))))
901 /* Don't take volume off line if the inumber is out of range
902 * or the inode table is full. */
903 if (nBytes == BAD_IGET) {
904 Log("VnLoad: bad inumber %s\n",
905 PrintInode(stmp, vp->vnodeIndex[class].handle->ih_ino));
908 } else if (nBytes == -1 && errno == EIO) {
909 /* disk error; salvage */
910 Log("VnLoad: Couldn't read vnode %u, volume %u (%s); volume needs salvage\n", Vn_id(vnp), V_id(vp), V_name(vp));
912 /* vnode is not allocated */
914 Log("VnLoad: Couldn't read vnode %u, volume %u (%s); read %d bytes, errno %d\n",
915 Vn_id(vnp), V_id(vp), V_name(vp), (int)nBytes, errno);
919 goto error_encountered_nolock;
924 /* Quick check to see that the data is reasonable */
925 if (vnp->disk.vnodeMagic != vcp->magic || vnp->disk.type == vNull) {
926 if (vnp->disk.type == vNull) {
930 struct vnodeIndex *index = &vp->vnodeIndex[class];
931 unsigned int bitNumber = vnodeIdToBitNumber(Vn_id(vnp));
932 unsigned int offset = bitNumber >> 3;
934 /* Test to see if vnode number is valid. */
935 if ((offset >= index->bitmapSize)
936 || ((*(index->bitmap + offset) & (1 << (bitNumber & 0x7)))
938 Log("VnLoad: Request for unallocated vnode %u, volume %u (%s) denied.\n", Vn_id(vnp), V_id(vp), V_name(vp));
942 Log("VnLoad: Bad magic number, vnode %u, volume %u (%s); volume needs salvage\n", Vn_id(vnp), V_id(vp), V_name(vp));
945 goto error_encountered;
948 IH_INIT(vnp->handle, V_device(vp), V_parentId(vp), VN_GET_INO(vnp));
949 VnUnlock(vnp, WRITE_LOCK);
950 #ifdef AFS_DEMAND_ATTACH_FS
951 VnChangeState_r(vnp, VN_STATE_ONLINE);
956 error_encountered_nolock:
958 FDH_REALLYCLOSE(fdP);
964 #ifdef AFS_DEMAND_ATTACH_FS
965 VRequestSalvage_r(&error, vp, SALVSYNC_ERROR, 0);
967 VForceOffline_r(vp, 0);
974 VInvalidateVnode_r(vnp);
975 VnUnlock(vnp, WRITE_LOCK);
979 * store a vnode to disk.
981 * @param[out] ec error code output
982 * @param[in] vp volume object pointer
983 * @param[in] vnp vnode object pointer
984 * @param[in] vcp vnode class info object pointer
985 * @param[in] class vnode class enumeration
987 * @pre VOL_LOCK held.
988 * caller holds refs to volume and vnode.
989 * DAFS: caller is responsible for performing state sanity checks.
991 * @post vnode state is stored to disk.
993 * @internal vnode package internal use only
996 VnStore(Error * ec, Volume * vp, Vnode * vnp,
997 struct VnodeClassInfo * vcp, VnodeClass class)
1001 IHandle_t *ihP = vp->vnodeIndex[class].handle;
1004 #ifdef AFS_DEMAND_ATTACH_FS
1005 VnState vn_state_save;
1010 #ifdef AFS_DEMAND_ATTACH_FS
1011 vn_state_save = VnChangeState_r(vnp, VN_STATE_STORE);
1014 offset = vnodeIndexOffset(vcp, Vn_id(vnp));
1018 Log("VnStore: can't open index file!\n");
1019 goto error_encountered;
1021 nBytes = FDH_PWRITE(fdP, &vnp->disk, vcp->diskSize, offset);
1022 if (nBytes != vcp->diskSize) {
1023 /* Don't force volume offline if the inumber is out of
1024 * range or the inode table is full.
1026 FDH_REALLYCLOSE(fdP);
1027 if (nBytes == BAD_IGET) {
1028 Log("VnStore: bad inumber %s\n",
1030 vp->vnodeIndex[class].handle->ih_ino));
1033 #ifdef AFS_DEMAND_ATTACH_FS
1034 VnChangeState_r(vnp, VN_STATE_ERROR);
1037 Log("VnStore: Couldn't write vnode %u, volume %u (%s) (error %d)\n", Vn_id(vnp), V_id(Vn_volume(vnp)), V_name(Vn_volume(vnp)), (int)nBytes);
1038 #ifdef AFS_DEMAND_ATTACH_FS
1039 goto error_encountered;
1042 VForceOffline_r(vp, 0);
1052 #ifdef AFS_DEMAND_ATTACH_FS
1053 VnChangeState_r(vnp, vn_state_save);
1058 #ifdef AFS_DEMAND_ATTACH_FS
1059 /* XXX instead of dumping core, let's try to request a salvage
1060 * and just fail the putvnode */
1064 VnChangeState_r(vnp, VN_STATE_ERROR);
1065 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, 0);
1072 * get a handle to a vnode object.
1074 * @param[out] ec error code
1075 * @param[in] vp volume object
1076 * @param[in] vnodeNumber vnode id
1077 * @param[in] locktype type of lock to acquire
1079 * @return vnode object pointer
1084 VGetVnode(Error * ec, Volume * vp, VnodeId vnodeNumber, int locktype)
1085 { /* READ_LOCK or WRITE_LOCK, as defined in lock.h */
1088 retVal = VGetVnode_r(ec, vp, vnodeNumber, locktype);
1094 * get a handle to a vnode object.
1096 * @param[out] ec error code
1097 * @param[in] vp volume object
1098 * @param[in] vnodeNumber vnode id
1099 * @param[in] locktype type of lock to acquire
1101 * @return vnode object pointer
1103 * @internal vnode package internal use only
1105 * @pre VOL_LOCK held.
1106 * heavyweight ref held on volume object.
1109 VGetVnode_r(Error * ec, Volume * vp, VnodeId vnodeNumber, int locktype)
1110 { /* READ_LOCK or WRITE_LOCK, as defined in lock.h */
1113 struct VnodeClassInfo *vcp;
1117 if (vnodeNumber == 0) {
1122 VNLog(100, 1, vnodeNumber, 0, 0, 0);
1124 #ifdef AFS_DEMAND_ATTACH_FS
1126 * once a volume has entered an error state, don't permit
1127 * further operations to proceed
1128 * -- tkeiser 11/21/2007
1130 VWaitExclusiveState_r(vp);
1131 if (VIsErrorState(V_attachState(vp))) {
1132 /* XXX is VSALVAGING acceptable here? */
1138 if (programType == fileServer && !V_inUse(vp)) {
1139 *ec = (vp->specialStatus ? vp->specialStatus : VOFFLINE);
1141 /* If the volume is VBUSY (being cloned or dumped) and this is
1142 * a READ operation, then don't fail.
1144 if ((*ec != VBUSY) || (locktype != READ_LOCK)) {
1149 class = vnodeIdToClass(vnodeNumber);
1150 vcp = &VnodeClassInfo[class];
1151 if (locktype == WRITE_LOCK && !VolumeWriteable(vp)) {
1152 *ec = (bit32) VREADONLY;
1156 if (locktype == WRITE_LOCK && programType == fileServer) {
1157 VAddToVolumeUpdateList_r(ec, vp);
1165 /* See whether the vnode is in the cache. */
1166 vnp = VLookupVnode(vp, vnodeNumber);
1168 /* vnode is in cache */
1170 VNLog(101, 2, vnodeNumber, (intptr_t)vnp, 0, 0);
1171 VnCreateReservation_r(vnp);
1173 #ifdef AFS_DEMAND_ATTACH_FS
1175 * this is the one DAFS case where we may run into contention.
1176 * here's the basic control flow:
1178 * if locktype is READ_LOCK:
1179 * wait until vnode is not exclusive
1180 * set to VN_STATE_READ
1181 * increment read count
1184 * wait until vnode is quiescent
1185 * set to VN_STATE_EXCLUSIVE
1188 if (locktype == READ_LOCK) {
1189 VnWaitExclusiveState_r(vnp);
1191 VnWaitQuiescent_r(vnp);
1194 if (VnIsErrorState(Vn_state(vnp))) {
1195 VnCancelReservation_r(vnp);
1199 #endif /* AFS_DEMAND_ATTACH_FS */
1201 /* vnode not cached */
1203 /* Not in cache; tentatively grab most distantly used one from the LRU
1206 vnp = VGetFreeVnode_r(vcp);
1209 vnp->changed_newTime = vnp->changed_oldTime = 0;
1211 Vn_id(vnp) = vnodeNumber;
1212 VnCreateReservation_r(vnp);
1213 AddToVVnList(vp, vnp);
1214 #ifdef AFS_DEMAND_ATTACH_FS
1219 * XXX for non-DAFS, there is a serious
1220 * race condition here:
1222 * two threads can race to load a vnode. the net
1223 * result is two struct Vnodes can be allocated
1224 * and hashed, which point to the same underlying
1225 * disk data store. conflicting vnode locks can
1226 * thus be held concurrently.
1228 * for non-DAFS to be safe, VOL_LOCK really shouldn't
1229 * be dropped in VnLoad. Of course, this would likely
1230 * lead to an unacceptable slow-down.
1233 VnLoad(ec, vp, vnp, vcp, class);
1235 VnCancelReservation_r(vnp);
1238 #ifndef AFS_DEMAND_ATTACH_FS
1243 * there is no possibility for contention. we "own" this vnode.
1249 * it is imperative that nothing drop vol lock between here
1250 * and the VnBeginRead/VnChangeState stanza below
1253 VnLock(vnp, locktype, VOL_LOCK_HELD, MIGHT_DEADLOCK);
1255 /* Check that the vnode hasn't been removed while we were obtaining
1257 VNLog(102, 2, vnodeNumber, (intptr_t) vnp, 0, 0);
1258 if ((vnp->disk.type == vNull) || (Vn_cacheCheck(vnp) == 0)) {
1259 VnUnlock(vnp, locktype);
1260 VnCancelReservation_r(vnp);
1262 /* vnode is labelled correctly by now, so we don't have to invalidate it */
1266 #ifdef AFS_DEMAND_ATTACH_FS
1267 if (locktype == READ_LOCK) {
1270 VnChangeState_r(vnp, VN_STATE_EXCLUSIVE);
1274 if (programType == fileServer)
1275 VBumpVolumeUsage_r(Vn_volume(vnp)); /* Hack; don't know where it should be
1276 * called from. Maybe VGetVolume */
1281 int TrustVnodeCacheEntry = 1;
1282 /* This variable is bogus--when it's set to 0, the hash chains fill
1283 up with multiple versions of the same vnode. Should fix this!! */
1285 VPutVnode(Error * ec, Vnode * vnp)
1288 VPutVnode_r(ec, vnp);
1293 * put back a handle to a vnode object.
1295 * @param[out] ec client error code
1296 * @param[in] vnp vnode object pointer
1298 * @pre VOL_LOCK held.
1299 * ref held on vnode.
1301 * @post ref dropped on vnode.
1302 * if vnode was modified or deleted, it is written out to disk
1303 * (assuming a write lock was held).
1305 * @internal volume package internal use only
1308 VPutVnode_r(Error * ec, Vnode * vnp)
1312 struct VnodeClassInfo *vcp;
1315 osi_Assert(Vn_refcount(vnp) != 0);
1316 class = vnodeIdToClass(Vn_id(vnp));
1317 vcp = &VnodeClassInfo[class];
1318 osi_Assert(vnp->disk.vnodeMagic == vcp->magic);
1319 VNLog(200, 2, Vn_id(vnp), (intptr_t) vnp, 0, 0);
1321 #ifdef AFS_DEMAND_ATTACH_FS
1322 writeLocked = (Vn_state(vnp) == VN_STATE_EXCLUSIVE);
1324 writeLocked = WriteLocked(&vnp->lock);
1329 #ifdef AFS_PTHREAD_ENV
1330 pthread_t thisProcess = pthread_self();
1331 #else /* AFS_PTHREAD_ENV */
1332 PROCESS thisProcess;
1333 LWP_CurrentProcess(&thisProcess);
1334 #endif /* AFS_PTHREAD_ENV */
1335 VNLog(201, 2, (intptr_t) vnp,
1336 ((vnp->changed_newTime) << 1) | ((vnp->
1337 changed_oldTime) << 1) | vnp->
1339 if (thisProcess != vnp->writer)
1340 Abort("VPutVnode: Vnode at %"AFS_PTR_FMT" locked by another process!\n",
1344 if (vnp->changed_oldTime || vnp->changed_newTime || vnp->delete) {
1345 Volume *vp = Vn_volume(vnp);
1346 afs_uint32 now = FT_ApproxTime();
1347 osi_Assert(Vn_cacheCheck(vnp) == vp->cacheCheck);
1350 /* No longer any directory entries for this vnode. Free the Vnode */
1351 memset(&vnp->disk, 0, sizeof(vnp->disk));
1352 /* delete flag turned off further down */
1353 VNLog(202, 2, Vn_id(vnp), (intptr_t) vnp, 0, 0);
1354 } else if (vnp->changed_newTime) {
1355 vnp->disk.serverModifyTime = now;
1357 if (vnp->changed_newTime)
1359 V_updateDate(vp) = vp->updateTime = now;
1360 if(V_volUpCounter(vp)<MAXINT)
1361 V_volUpCounter(vp)++;
1364 /* The vnode has been changed. Write it out to disk */
1366 #ifdef AFS_DEMAND_ATTACH_FS
1367 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, 0);
1369 osi_Assert(V_needsSalvaged(vp));
1373 VnStore(ec, vp, vnp, vcp, class);
1375 /* If the vnode is to be deleted, and we wrote the vnode out,
1376 * free its bitmap entry. Do after the vnode is written so we
1377 * don't allocate from bitmap before the vnode is written
1378 * (doing so could cause a "addled bitmap" message).
1380 if (vnp->delete && !*ec) {
1381 if (Vn_volume(vnp)->header->diskstuff.filecount-- < 1)
1382 Vn_volume(vnp)->header->diskstuff.filecount = 0;
1383 VFreeBitMapEntry_r(ec, &vp->vnodeIndex[class],
1384 vnodeIdToBitNumber(Vn_id(vnp)));
1388 vnp->changed_newTime = vnp->changed_oldTime = 0;
1390 #ifdef AFS_DEMAND_ATTACH_FS
1391 VnChangeState_r(vnp, VN_STATE_ONLINE);
1393 } else { /* Not write locked */
1394 if (vnp->changed_newTime || vnp->changed_oldTime || vnp->delete)
1396 ("VPutVnode: Change or delete flag for vnode "
1397 "%"AFS_PTR_FMT" is set but vnode is not write locked!\n",
1399 #ifdef AFS_DEMAND_ATTACH_FS
1404 /* Do not look at disk portion of vnode after this point; it may
1405 * have been deleted above */
1407 VnUnlock(vnp, ((writeLocked) ? WRITE_LOCK : READ_LOCK));
1408 VnCancelReservation_r(vnp);
1412 * Make an attempt to convert a vnode lock from write to read.
1413 * Do nothing if the vnode isn't write locked or the vnode has
1417 VVnodeWriteToRead(Error * ec, Vnode * vnp)
1421 retVal = VVnodeWriteToRead_r(ec, vnp);
1427 * convert vnode handle from mutually exclusive to shared access.
1429 * @param[out] ec client error code
1430 * @param[in] vnp vnode object pointer
1432 * @return unspecified use (see out argument 'ec' for error code return)
1434 * @pre VOL_LOCK held.
1435 * ref held on vnode.
1436 * write lock held on vnode.
1438 * @post read lock held on vnode.
1439 * if vnode was modified, it has been written to disk.
1441 * @internal volume package internal use only
1444 VVnodeWriteToRead_r(Error * ec, Vnode * vnp)
1448 struct VnodeClassInfo *vcp;
1449 #ifdef AFS_PTHREAD_ENV
1450 pthread_t thisProcess;
1451 #else /* AFS_PTHREAD_ENV */
1452 PROCESS thisProcess;
1453 #endif /* AFS_PTHREAD_ENV */
1456 osi_Assert(Vn_refcount(vnp) != 0);
1457 class = vnodeIdToClass(Vn_id(vnp));
1458 vcp = &VnodeClassInfo[class];
1459 osi_Assert(vnp->disk.vnodeMagic == vcp->magic);
1460 VNLog(300, 2, Vn_id(vnp), (intptr_t) vnp, 0, 0);
1462 #ifdef AFS_DEMAND_ATTACH_FS
1463 writeLocked = (Vn_state(vnp) == VN_STATE_EXCLUSIVE);
1465 writeLocked = WriteLocked(&vnp->lock);
1472 VNLog(301, 2, (intptr_t) vnp,
1473 ((vnp->changed_newTime) << 1) | ((vnp->
1474 changed_oldTime) << 1) | vnp->
1478 #ifdef AFS_PTHREAD_ENV
1479 thisProcess = pthread_self();
1480 #else /* AFS_PTHREAD_ENV */
1481 LWP_CurrentProcess(&thisProcess);
1482 #endif /* AFS_PTHREAD_ENV */
1483 if (thisProcess != vnp->writer)
1484 Abort("VPutVnode: Vnode at %"AFS_PTR_FMT
1485 " locked by another process!\n", vnp);
1490 if (vnp->changed_oldTime || vnp->changed_newTime) {
1491 Volume *vp = Vn_volume(vnp);
1492 afs_uint32 now = FT_ApproxTime();
1493 osi_Assert(Vn_cacheCheck(vnp) == vp->cacheCheck);
1494 if (vnp->changed_newTime)
1495 vnp->disk.serverModifyTime = now;
1496 if (vnp->changed_newTime)
1497 V_updateDate(vp) = vp->updateTime = now;
1499 /* The inode has been changed. Write it out to disk */
1501 #ifdef AFS_DEMAND_ATTACH_FS
1502 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, 0);
1504 osi_Assert(V_needsSalvaged(vp));
1508 VnStore(ec, vp, vnp, vcp, class);
1511 vnp->changed_newTime = vnp->changed_oldTime = 0;
1515 #ifdef AFS_DEMAND_ATTACH_FS
1516 VnChangeState_r(vnp, VN_STATE_ONLINE);
1519 ConvertWriteToReadLock(&vnp->lock);
1525 * initial size of ihandle pointer vector.
1527 * @see VInvalidateVnodesByVolume_r
1529 #define IH_VEC_BASE_SIZE 256
1532 * increment amount for growing ihandle pointer vector.
1534 * @see VInvalidateVnodesByVolume_r
1536 #define IH_VEC_INCREMENT 256
1539 * Compile list of ihandles to be released/reallyclosed at a later time.
1541 * @param[in] vp volume object pointer
1542 * @param[out] vec_out vector of ihandle pointers to be released/reallyclosed
1543 * @param[out] vec_len_out number of valid elements in ihandle vector
1545 * @pre - VOL_LOCK is held
1546 * - volume is in appropriate exclusive state (e.g. VOL_STATE_VNODE_CLOSE,
1547 * VOL_STATE_VNODE_RELEASE)
1549 * @post - all vnodes on VVn list are invalidated
1550 * - ih_vec is populated with all valid ihandles
1552 * @return operation status
1554 * @retval ENOMEM out of memory
1556 * @todo we should handle out of memory conditions more gracefully.
1558 * @internal vnode package internal use only
1561 VInvalidateVnodesByVolume_r(Volume * vp,
1562 IHandle_t *** vec_out,
1563 size_t * vec_len_out)
1567 size_t i = 0, vec_len;
1568 IHandle_t **ih_vec, **ih_vec_new;
1570 #ifdef AFS_DEMAND_ATTACH_FS
1572 #endif /* AFS_DEMAND_ATTACH_FS */
1574 vec_len = IH_VEC_BASE_SIZE;
1575 ih_vec = malloc(sizeof(IHandle_t *) * vec_len);
1576 #ifdef AFS_DEMAND_ATTACH_FS
1583 * Traverse the volume's vnode list. Pull all the ihandles out into a
1584 * thread-private array for later asynchronous processing.
1586 #ifdef AFS_DEMAND_ATTACH_FS
1589 for (queue_Scan(&vp->vnode_list, vnp, nvnp, Vnode)) {
1590 if (vnp->handle != NULL) {
1592 #ifdef AFS_DEMAND_ATTACH_FS
1595 vec_len += IH_VEC_INCREMENT;
1596 ih_vec_new = realloc(ih_vec, sizeof(IHandle_t *) * vec_len);
1597 #ifdef AFS_DEMAND_ATTACH_FS
1600 if (ih_vec_new == NULL) {
1604 ih_vec = ih_vec_new;
1605 #ifdef AFS_DEMAND_ATTACH_FS
1607 * Theoretically, the volume's VVn list should not change
1608 * because the volume is in an exclusive state. For the
1609 * sake of safety, we will restart the traversal from the
1610 * the beginning (which is not expensive because we're
1611 * deleting the items from the list as we go).
1613 goto restart_traversal;
1616 ih_vec[i++] = vnp->handle;
1619 DeleteFromVVnList(vnp);
1620 VInvalidateVnode_r(vnp);
1630 /* VCloseVnodeFiles - called when a volume is going off line. All open
1631 * files for vnodes in that volume are closed. This might be excessive,
1632 * since we may only be taking one volume of a volume group offline.
1635 VCloseVnodeFiles_r(Volume * vp)
1637 #ifdef AFS_DEMAND_ATTACH_FS
1638 VolState vol_state_save;
1640 IHandle_t ** ih_vec;
1643 #ifdef AFS_DEMAND_ATTACH_FS
1644 vol_state_save = VChangeState_r(vp, VOL_STATE_VNODE_CLOSE);
1645 #endif /* AFS_DEMAND_ATTACH_FS */
1647 /* XXX need better error handling here */
1648 osi_Assert(VInvalidateVnodesByVolume_r(vp,
1654 * now we drop VOL_LOCK while we perform some potentially very
1655 * expensive operations in the background
1657 #ifdef AFS_DEMAND_ATTACH_FS
1661 for (i = 0; i < vec_len; i++) {
1662 IH_REALLYCLOSE(ih_vec[i]);
1667 #ifdef AFS_DEMAND_ATTACH_FS
1669 VChangeState_r(vp, vol_state_save);
1670 #endif /* AFS_DEMAND_ATTACH_FS */
1675 * shut down all vnode cache state for a given volume.
1677 * @param[in] vp volume object pointer
1679 * @pre VOL_LOCK is held
1681 * @post all file descriptors closed.
1682 * all inode handles released.
1683 * all vnode cache objects disassociated from volume.
1685 * @note for DAFS, these operations are performed outside the vol glock under
1686 * volume exclusive state VOL_STATE_VNODE_RELEASE. Please further note
1687 * that it would be a bug to acquire and release a volume reservation
1688 * during this exclusive operation. This is due to the fact that we are
1689 * generally called during the refcount 1->0 transition.
1691 * @todo we should handle failures in VInvalidateVnodesByVolume_r more
1694 * @see VInvalidateVnodesByVolume_r
1696 * @internal this routine is internal to the volume package
1699 VReleaseVnodeFiles_r(Volume * vp)
1701 #ifdef AFS_DEMAND_ATTACH_FS
1702 VolState vol_state_save;
1704 IHandle_t ** ih_vec;
1707 #ifdef AFS_DEMAND_ATTACH_FS
1708 vol_state_save = VChangeState_r(vp, VOL_STATE_VNODE_RELEASE);
1709 #endif /* AFS_DEMAND_ATTACH_FS */
1711 /* XXX need better error handling here */
1712 osi_Assert(VInvalidateVnodesByVolume_r(vp,
1718 * now we drop VOL_LOCK while we perform some potentially very
1719 * expensive operations in the background
1721 #ifdef AFS_DEMAND_ATTACH_FS
1725 for (i = 0; i < vec_len; i++) {
1726 IH_RELEASE(ih_vec[i]);
1731 #ifdef AFS_DEMAND_ATTACH_FS
1733 VChangeState_r(vp, vol_state_save);
1734 #endif /* AFS_DEMAND_ATTACH_FS */