2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
9 * Portions Copyright (c) 2005-2008 Sine Nomine Associates
12 /* 1/1/89: NB: this stuff is all going to be replaced. Don't take it too seriously */
17 Institution: The Information Technology Center, Carnegie-Mellon University
21 #include <afsconfig.h>
22 #include <afs/param.h>
28 #include <afs/afsint.h>
31 #include <sys/param.h>
32 #if !defined(AFS_SGI_ENV)
35 #else /* AFS_OSF_ENV */
36 #ifdef AFS_VFSINCL_ENV
39 #include <sys/fs/ufs_fs.h>
41 #if defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
42 #include <ufs/ufs/dinode.h>
43 #include <ufs/ffs/fs.h>
48 #else /* AFS_VFSINCL_ENV */
49 #if !defined(AFS_AIX_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_XBSD_ENV)
52 #endif /* AFS_VFSINCL_ENV */
53 #endif /* AFS_OSF_ENV */
54 #endif /* AFS_SGI_ENV */
55 #endif /* AFS_NT40_ENV */
73 #if defined(AFS_SUN_ENV) || defined(AFS_SUN5_ENV)
75 #include <sys/mnttab.h>
76 #include <sys/mntent.h>
82 #if defined(AFS_SGI_ENV)
87 #ifndef AFS_LINUX20_ENV
88 #include <fstab.h> /* Need to find in libc 5, present in libc 6 */
91 #endif /* AFS_SGI_ENV */
93 #endif /* AFS_HPUX_ENV */
97 #include <netinet/in.h>
101 #include <sys/time.h>
102 #endif /* ITIMER_REAL */
103 #endif /* AFS_NT40_ENV */
104 #if defined(AFS_SUN5_ENV) || defined(AFS_NT40_ENV) || defined(AFS_LINUX20_ENV)
111 #include <afs/errors.h>
114 #include <afs/afssyscalls.h>
116 #include <afs/afsutil.h>
120 #include "daemon_com.h"
122 #include "salvsync.h"
125 #include "partition.h"
126 #include "volume_inline.h"
127 #ifdef AFS_PTHREAD_ENV
129 #else /* AFS_PTHREAD_ENV */
130 #include "afs/assert.h"
131 #endif /* AFS_PTHREAD_ENV */
138 #if !defined(offsetof)
143 #define afs_stat stat64
144 #define afs_fstat fstat64
145 #define afs_open open64
146 #else /* !O_LARGEFILE */
147 #define afs_stat stat
148 #define afs_fstat fstat
149 #define afs_open open
150 #endif /* !O_LARGEFILE */
152 #ifdef AFS_PTHREAD_ENV
153 pthread_mutex_t vol_glock_mutex;
154 pthread_mutex_t vol_trans_mutex;
155 pthread_cond_t vol_put_volume_cond;
156 pthread_cond_t vol_sleep_cond;
157 int vol_attach_threads = 1;
158 #endif /* AFS_PTHREAD_ENV */
160 #ifdef AFS_DEMAND_ATTACH_FS
161 pthread_mutex_t vol_salvsync_mutex;
162 #endif /* AFS_DEMAND_ATTACH_FS */
165 extern void *calloc(), *realloc();
168 /*@printflike@*/ extern void Log(const char *format, ...);
170 /* Forward declarations */
171 static Volume *attach2(Error * ec, VolId vid, char *path,
172 register struct VolumeHeader *header,
173 struct DiskPartition64 *partp, Volume * vp,
174 int isbusy, int mode);
175 static void ReallyFreeVolume(Volume * vp);
176 #ifdef AFS_DEMAND_ATTACH_FS
177 static void FreeVolume(Volume * vp);
178 #else /* !AFS_DEMAND_ATTACH_FS */
179 #define FreeVolume(vp) ReallyFreeVolume(vp)
180 static void VScanUpdateList(void);
181 #endif /* !AFS_DEMAND_ATTACH_FS */
182 static void VInitVolumeHeaderCache(afs_uint32 howMany);
183 static int GetVolumeHeader(register Volume * vp);
184 static void ReleaseVolumeHeader(register struct volHeader *hd);
185 static void FreeVolumeHeader(register Volume * vp);
186 static void AddVolumeToHashTable(register Volume * vp, int hashid);
187 static void DeleteVolumeFromHashTable(register Volume * vp);
188 static int VHold(Volume * vp);
189 static int VHold_r(Volume * vp);
190 static void VGetBitmap_r(Error * ec, Volume * vp, VnodeClass class);
191 static void GetVolumePath(Error * ec, VolId volumeId, char **partitionp,
193 static void VReleaseVolumeHandles_r(Volume * vp);
194 static void VCloseVolumeHandles_r(Volume * vp);
195 static void LoadVolumeHeader(Error * ec, Volume * vp);
196 static int VCheckOffline(register Volume * vp);
197 static int VCheckDetach(register Volume * vp);
198 static Volume * GetVolume(Error * ec, Error * client_ec, VolId volumeId, Volume * hint, int flags);
199 static int VolumeExternalName_r(VolumeId volumeId, char * name, size_t len);
201 int LogLevel; /* Vice loglevel--not defined as extern so that it will be
202 * defined when not linked with vice, XXXX */
203 ProgramType programType; /* The type of program using the package */
205 /* extended volume package statistics */
208 #ifdef VOL_LOCK_DEBUG
209 pthread_t vol_glock_holder = 0;
213 #define VOLUME_BITMAP_GROWSIZE 16 /* bytes, => 128vnodes */
214 /* Must be a multiple of 4 (1 word) !! */
216 /* this parameter needs to be tunable at runtime.
217 * 128 was really inadequate for largish servers -- at 16384 volumes this
218 * puts average chain length at 128, thus an average 65 deref's to find a volptr.
219 * talk about bad spatial locality...
221 * an AVL or splay tree might work a lot better, but we'll just increase
222 * the default hash table size for now
224 #define DEFAULT_VOLUME_HASH_SIZE 256 /* Must be a power of 2!! */
225 #define DEFAULT_VOLUME_HASH_MASK (DEFAULT_VOLUME_HASH_SIZE-1)
226 #define VOLUME_HASH(volumeId) (volumeId&(VolumeHashTable.Mask))
229 * turn volume hash chains into partially ordered lists.
230 * when the threshold is exceeded between two adjacent elements,
231 * perform a chain rebalancing operation.
233 * keep the threshold high in order to keep cache line invalidates
234 * low "enough" on SMPs
236 #define VOLUME_HASH_REORDER_THRESHOLD 200
239 * when possible, don't just reorder single elements, but reorder
240 * entire chains of elements at once. a chain of elements that
241 * exceed the element previous to the pivot by at least CHAIN_THRESH
242 * accesses are moved in front of the chain whose elements have at
243 * least CHAIN_THRESH less accesses than the pivot element
245 #define VOLUME_HASH_REORDER_CHAIN_THRESH (VOLUME_HASH_REORDER_THRESHOLD / 2)
247 #include "rx/rx_queue.h"
250 VolumeHashTable_t VolumeHashTable = {
251 DEFAULT_VOLUME_HASH_SIZE,
252 DEFAULT_VOLUME_HASH_MASK,
257 static void VInitVolumeHash(void);
261 /* This macro is used where an ffs() call does not exist. Was in util/ffs.c */
265 afs_int32 ffs_tmp = x;
269 for (ffs_i = 1;; ffs_i++) {
276 #endif /* !AFS_HAVE_FFS */
278 #ifdef AFS_PTHREAD_ENV
279 typedef struct diskpartition_queue_t {
280 struct rx_queue queue;
281 struct DiskPartition64 * diskP;
282 } diskpartition_queue_t;
283 typedef struct vinitvolumepackage_thread_t {
284 struct rx_queue queue;
285 pthread_cond_t thread_done_cv;
286 int n_threads_complete;
287 } vinitvolumepackage_thread_t;
288 static void * VInitVolumePackageThread(void * args);
289 #endif /* AFS_PTHREAD_ENV */
291 static int VAttachVolumesByPartition(struct DiskPartition64 *diskP,
292 int * nAttached, int * nUnattached);
295 #ifdef AFS_DEMAND_ATTACH_FS
296 /* demand attach fileserver extensions */
299 * in the future we will support serialization of VLRU state into the fs_state
302 * these structures are the beginning of that effort
304 struct VLRU_DiskHeader {
305 struct versionStamp stamp; /* magic and structure version number */
306 afs_uint32 mtime; /* time of dump to disk */
307 afs_uint32 num_records; /* number of VLRU_DiskEntry records */
310 struct VLRU_DiskEntry {
311 afs_uint32 vid; /* volume ID */
312 afs_uint32 idx; /* generation */
313 afs_uint32 last_get; /* timestamp of last get */
316 struct VLRU_StartupQueue {
317 struct VLRU_DiskEntry * entry;
322 typedef struct vshutdown_thread_t {
324 pthread_mutex_t lock;
326 pthread_cond_t master_cv;
328 int n_threads_complete;
330 int schedule_version;
333 byte n_parts_done_pass;
334 byte part_thread_target[VOLMAXPARTS+1];
335 byte part_done_pass[VOLMAXPARTS+1];
336 struct rx_queue * part_pass_head[VOLMAXPARTS+1];
337 int stats[4][VOLMAXPARTS+1];
338 } vshutdown_thread_t;
339 static void * VShutdownThread(void * args);
342 static Volume * VAttachVolumeByVp_r(Error * ec, Volume * vp, int mode);
343 static int VCheckFree(Volume * vp);
346 static void AddVolumeToVByPList_r(Volume * vp);
347 static void DeleteVolumeFromVByPList_r(Volume * vp);
348 static void VVByPListBeginExclusive_r(struct DiskPartition64 * dp);
349 static void VVByPListEndExclusive_r(struct DiskPartition64 * dp);
350 static void VVByPListWait_r(struct DiskPartition64 * dp);
352 /* online salvager */
353 static int VCheckSalvage(register Volume * vp);
354 static int VUpdateSalvagePriority_r(Volume * vp);
355 static int VScheduleSalvage_r(Volume * vp);
356 static int VCancelSalvage_r(Volume * vp, int reason);
358 /* Volume hash table */
359 static void VReorderHash_r(VolumeHashChainHead * head, Volume * pp, Volume * vp);
360 static void VHashBeginExclusive_r(VolumeHashChainHead * head);
361 static void VHashEndExclusive_r(VolumeHashChainHead * head);
362 static void VHashWait_r(VolumeHashChainHead * head);
365 static int ShutdownVByPForPass_r(struct DiskPartition64 * dp, int pass);
366 static int ShutdownVolumeWalk_r(struct DiskPartition64 * dp, int pass,
367 struct rx_queue ** idx);
368 static void ShutdownController(vshutdown_thread_t * params);
369 static void ShutdownCreateSchedule(vshutdown_thread_t * params);
372 static void VLRU_ComputeConstants(void);
373 static void VInitVLRU(void);
374 static void VLRU_Init_Node_r(volatile Volume * vp);
375 static void VLRU_Add_r(volatile Volume * vp);
376 static void VLRU_Delete_r(volatile Volume * vp);
377 static void VLRU_UpdateAccess_r(volatile Volume * vp);
378 static void * VLRU_ScannerThread(void * args);
379 static void VLRU_Scan_r(int idx);
380 static void VLRU_Promote_r(int idx);
381 static void VLRU_Demote_r(int idx);
382 static void VLRU_SwitchQueues(volatile Volume * vp, int new_idx, int append);
385 static int VCheckSoftDetach(volatile Volume * vp, afs_uint32 thresh);
386 static int VCheckSoftDetachCandidate(volatile Volume * vp, afs_uint32 thresh);
387 static int VSoftDetachVolume_r(volatile Volume * vp, afs_uint32 thresh);
388 #endif /* AFS_DEMAND_ATTACH_FS */
391 struct Lock vol_listLock; /* Lock obtained when listing volumes:
392 * prevents a volume from being missed
393 * if the volume is attached during a
397 static int TimeZoneCorrection; /* Number of seconds west of GMT */
399 /* Common message used when the volume goes off line */
400 char *VSalvageMessage =
401 "Files in this volume are currently unavailable; call operations";
403 int VInit; /* 0 - uninitialized,
404 * 1 - initialized but not all volumes have been attached,
405 * 2 - initialized and all volumes have been attached,
406 * 3 - initialized, all volumes have been attached, and
407 * VConnectFS() has completed. */
410 bit32 VolumeCacheCheck; /* Incremented everytime a volume goes on line--
411 * used to stamp volume headers and in-core
412 * vnodes. When the volume goes on-line the
413 * vnode will be invalidated
414 * access only with VOL_LOCK held */
419 /***************************************************/
420 /* Startup routines */
421 /***************************************************/
424 VInitVolumePackage(ProgramType pt, afs_uint32 nLargeVnodes, afs_uint32 nSmallVnodes,
425 int connect, afs_uint32 volcache)
427 int errors = 0; /* Number of errors while finding vice partitions. */
433 memset(&VStats, 0, sizeof(VStats));
434 VStats.hdr_cache_size = 200;
436 VInitPartitionPackage();
438 #ifdef AFS_DEMAND_ATTACH_FS
439 if (programType == fileServer) {
442 VLRU_SetOptions(VLRU_SET_ENABLED, 0);
446 #ifdef AFS_PTHREAD_ENV
447 assert(pthread_mutex_init(&vol_glock_mutex, NULL) == 0);
448 assert(pthread_mutex_init(&vol_trans_mutex, NULL) == 0);
449 assert(pthread_cond_init(&vol_put_volume_cond, NULL) == 0);
450 assert(pthread_cond_init(&vol_sleep_cond, NULL) == 0);
451 #else /* AFS_PTHREAD_ENV */
453 #endif /* AFS_PTHREAD_ENV */
454 Lock_Init(&vol_listLock);
456 srandom(time(0)); /* For VGetVolumeInfo */
457 gettimeofday(&tv, &tz);
458 TimeZoneCorrection = tz.tz_minuteswest * 60;
460 #ifdef AFS_DEMAND_ATTACH_FS
461 assert(pthread_mutex_init(&vol_salvsync_mutex, NULL) == 0);
462 #endif /* AFS_DEMAND_ATTACH_FS */
464 /* Ok, we have done enough initialization that fileserver can
465 * start accepting calls, even though the volumes may not be
466 * available just yet.
470 #if defined(AFS_DEMAND_ATTACH_FS) && defined(SALVSYNC_BUILD_SERVER)
471 if (programType == salvageServer) {
474 #endif /* AFS_DEMAND_ATTACH_FS */
475 #ifdef FSSYNC_BUILD_SERVER
476 if (programType == fileServer) {
480 #if defined(AFS_DEMAND_ATTACH_FS) && defined(SALVSYNC_BUILD_CLIENT)
481 if (programType == fileServer) {
482 /* establish a connection to the salvager at this point */
483 assert(VConnectSALV() != 0);
485 #endif /* AFS_DEMAND_ATTACH_FS */
487 if (volcache > VStats.hdr_cache_size)
488 VStats.hdr_cache_size = volcache;
489 VInitVolumeHeaderCache(VStats.hdr_cache_size);
491 VInitVnodes(vLarge, nLargeVnodes);
492 VInitVnodes(vSmall, nSmallVnodes);
495 errors = VAttachPartitions();
499 if (programType == fileServer) {
500 struct DiskPartition64 *diskP;
501 #ifdef AFS_PTHREAD_ENV
502 struct vinitvolumepackage_thread_t params;
503 struct diskpartition_queue_t * dpq;
504 int i, threads, parts;
506 pthread_attr_t attrs;
508 assert(pthread_cond_init(¶ms.thread_done_cv,NULL) == 0);
510 params.n_threads_complete = 0;
512 /* create partition work queue */
513 for (parts=0, diskP = DiskPartitionList; diskP; diskP = diskP->next, parts++) {
514 dpq = (diskpartition_queue_t *) malloc(sizeof(struct diskpartition_queue_t));
517 queue_Append(¶ms,dpq);
520 threads = MIN(parts, vol_attach_threads);
523 /* spawn off a bunch of initialization threads */
524 assert(pthread_attr_init(&attrs) == 0);
525 assert(pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED) == 0);
527 Log("VInitVolumePackage: beginning parallel fileserver startup\n");
528 #ifdef AFS_DEMAND_ATTACH_FS
529 Log("VInitVolumePackage: using %d threads to pre-attach volumes on %d partitions\n",
531 #else /* AFS_DEMAND_ATTACH_FS */
532 Log("VInitVolumePackage: using %d threads to attach volumes on %d partitions\n",
534 #endif /* AFS_DEMAND_ATTACH_FS */
537 for (i=0; i < threads; i++) {
538 assert(pthread_create
539 (&tid, &attrs, &VInitVolumePackageThread,
543 while(params.n_threads_complete < threads) {
544 VOL_CV_WAIT(¶ms.thread_done_cv);
548 assert(pthread_attr_destroy(&attrs) == 0);
550 /* if we're only going to run one init thread, don't bother creating
552 Log("VInitVolumePackage: beginning single-threaded fileserver startup\n");
553 #ifdef AFS_DEMAND_ATTACH_FS
554 Log("VInitVolumePackage: using 1 thread to pre-attach volumes on %d partition(s)\n",
556 #else /* AFS_DEMAND_ATTACH_FS */
557 Log("VInitVolumePackage: using 1 thread to attach volumes on %d partition(s)\n",
559 #endif /* AFS_DEMAND_ATTACH_FS */
561 VInitVolumePackageThread(¶ms);
564 assert(pthread_cond_destroy(¶ms.thread_done_cv) == 0);
566 #else /* AFS_PTHREAD_ENV */
570 /* Attach all the volumes in this partition */
571 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
572 int nAttached = 0, nUnattached = 0;
573 assert(VAttachVolumesByPartition(diskP, &nAttached, &nUnattached) == 0);
575 #endif /* AFS_PTHREAD_ENV */
578 VInit = 2; /* Initialized, and all volumes have been attached */
579 #ifdef FSSYNC_BUILD_CLIENT
580 if (programType == volumeUtility && connect) {
582 Log("Unable to connect to file server; aborted\n");
586 #ifdef AFS_DEMAND_ATTACH_FS
587 else if (programType == salvageServer) {
589 Log("Unable to connect to file server; aborted\n");
593 #endif /* AFS_DEMAND_ATTACH_FS */
594 #endif /* FSSYNC_BUILD_CLIENT */
598 #ifdef AFS_PTHREAD_ENV
600 VInitVolumePackageThread(void * args) {
601 int errors = 0; /* Number of errors while finding vice partitions. */
605 struct DiskPartition64 *diskP;
606 struct vinitvolumepackage_thread_t * params;
607 struct diskpartition_queue_t * dpq;
609 params = (vinitvolumepackage_thread_t *) args;
613 /* Attach all the volumes in this partition */
614 while (queue_IsNotEmpty(params)) {
615 int nAttached = 0, nUnattached = 0;
617 dpq = queue_First(params,diskpartition_queue_t);
623 assert(VAttachVolumesByPartition(diskP, &nAttached, &nUnattached) == 0);
628 params->n_threads_complete++;
629 pthread_cond_signal(¶ms->thread_done_cv);
633 #endif /* AFS_PTHREAD_ENV */
636 * attach all volumes on a given disk partition
639 VAttachVolumesByPartition(struct DiskPartition64 *diskP, int * nAttached, int * nUnattached)
645 Log("Partition %s: attaching volumes\n", diskP->name);
646 dirp = opendir(VPartitionPath(diskP));
648 Log("opendir on Partition %s failed!\n", diskP->name);
652 while ((dp = readdir(dirp))) {
654 p = strrchr(dp->d_name, '.');
655 if (p != NULL && strcmp(p, VHDREXT) == 0) {
658 #ifdef AFS_DEMAND_ATTACH_FS
659 vp = VPreAttachVolumeByName(&error, diskP->name, dp->d_name);
660 #else /* AFS_DEMAND_ATTACH_FS */
661 vp = VAttachVolumeByName(&error, diskP->name, dp->d_name,
663 #endif /* AFS_DEMAND_ATTACH_FS */
664 (*(vp ? nAttached : nUnattached))++;
665 if (error == VOFFLINE)
666 Log("Volume %d stays offline (/vice/offline/%s exists)\n", VolumeNumber(dp->d_name), dp->d_name);
667 else if (LogLevel >= 5) {
668 Log("Partition %s: attached volume %d (%s)\n",
669 diskP->name, VolumeNumber(dp->d_name),
672 #if !defined(AFS_DEMAND_ATTACH_FS)
676 #endif /* AFS_DEMAND_ATTACH_FS */
680 Log("Partition %s: attached %d volumes; %d volumes not attached\n", diskP->name, *nAttached, *nUnattached);
686 /***************************************************/
687 /* Shutdown routines */
688 /***************************************************/
692 * highly multithreaded volume package shutdown
694 * with the demand attach fileserver extensions,
695 * VShutdown has been modified to be multithreaded.
696 * In order to achieve optimal use of many threads,
697 * the shutdown code involves one control thread and
698 * n shutdown worker threads. The control thread
699 * periodically examines the number of volumes available
700 * for shutdown on each partition, and produces a worker
701 * thread allocation schedule. The idea is to eliminate
702 * redundant scheduling computation on the workers by
703 * having a single master scheduler.
705 * The scheduler's objectives are:
707 * each partition with volumes remaining gets allocated
708 * at least 1 thread (assuming sufficient threads)
710 * threads are allocated proportional to the number of
711 * volumes remaining to be offlined. This ensures that
712 * the OS I/O scheduler has many requests to elevator
713 * seek on partitions that will (presumably) take the
714 * longest amount of time (from now) to finish shutdown
715 * (3) keep threads busy
716 * when there are extra threads, they are assigned to
717 * partitions using a simple round-robin algorithm
719 * In the future, we may wish to add the ability to adapt
720 * to the relative performance patterns of each disk
725 * multi-step shutdown process
727 * demand attach shutdown is a four-step process. Each
728 * shutdown "pass" shuts down increasingly more difficult
729 * volumes. The main purpose is to achieve better cache
730 * utilization during shutdown.
733 * shutdown volumes in the unattached, pre-attached
736 * shutdown attached volumes with cached volume headers
738 * shutdown all volumes in non-exclusive states
740 * shutdown all remaining volumes
747 register Volume *vp, *np;
748 register afs_int32 code;
749 #ifdef AFS_DEMAND_ATTACH_FS
750 struct DiskPartition64 * diskP;
751 struct diskpartition_queue_t * dpq;
752 vshutdown_thread_t params;
754 pthread_attr_t attrs;
756 memset(¶ms, 0, sizeof(vshutdown_thread_t));
758 for (params.n_parts=0, diskP = DiskPartitionList;
759 diskP; diskP = diskP->next, params.n_parts++);
761 Log("VShutdown: shutting down on-line volumes on %d partition%s...\n",
762 params.n_parts, params.n_parts > 1 ? "s" : "");
764 if (vol_attach_threads > 1) {
765 /* prepare for parallel shutdown */
766 params.n_threads = vol_attach_threads;
767 assert(pthread_mutex_init(¶ms.lock, NULL) == 0);
768 assert(pthread_cond_init(¶ms.cv, NULL) == 0);
769 assert(pthread_cond_init(¶ms.master_cv, NULL) == 0);
770 assert(pthread_attr_init(&attrs) == 0);
771 assert(pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED) == 0);
774 /* setup the basic partition information structures for
775 * parallel shutdown */
776 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
778 struct rx_queue * qp, * nqp;
782 VVByPListWait_r(diskP);
783 VVByPListBeginExclusive_r(diskP);
786 for (queue_Scan(&diskP->vol_list, qp, nqp, rx_queue)) {
787 vp = (Volume *)((char *)qp - offsetof(Volume, vol_list));
791 Log("VShutdown: partition %s has %d volumes with attached headers\n",
792 VPartitionPath(diskP), count);
795 /* build up the pass 0 shutdown work queue */
796 dpq = (struct diskpartition_queue_t *) malloc(sizeof(struct diskpartition_queue_t));
799 queue_Prepend(¶ms, dpq);
801 params.part_pass_head[diskP->device] = queue_First(&diskP->vol_list, rx_queue);
804 Log("VShutdown: beginning parallel fileserver shutdown\n");
805 Log("VShutdown: using %d threads to offline volumes on %d partition%s\n",
806 vol_attach_threads, params.n_parts, params.n_parts > 1 ? "s" : "" );
808 /* do pass 0 shutdown */
809 assert(pthread_mutex_lock(¶ms.lock) == 0);
810 for (i=0; i < params.n_threads; i++) {
811 assert(pthread_create
812 (&tid, &attrs, &VShutdownThread,
816 /* wait for all the pass 0 shutdowns to complete */
817 while (params.n_threads_complete < params.n_threads) {
818 assert(pthread_cond_wait(¶ms.master_cv, ¶ms.lock) == 0);
820 params.n_threads_complete = 0;
822 assert(pthread_cond_broadcast(¶ms.cv) == 0);
823 assert(pthread_mutex_unlock(¶ms.lock) == 0);
825 Log("VShutdown: pass 0 completed using the 1 thread per partition algorithm\n");
826 Log("VShutdown: starting passes 1 through 3 using finely-granular mp-fast algorithm\n");
828 /* run the parallel shutdown scheduler. it will drop the glock internally */
829 ShutdownController(¶ms);
831 /* wait for all the workers to finish pass 3 and terminate */
832 while (params.pass < 4) {
833 VOL_CV_WAIT(¶ms.cv);
836 assert(pthread_attr_destroy(&attrs) == 0);
837 assert(pthread_cond_destroy(¶ms.cv) == 0);
838 assert(pthread_cond_destroy(¶ms.master_cv) == 0);
839 assert(pthread_mutex_destroy(¶ms.lock) == 0);
841 /* drop the VByPList exclusive reservations */
842 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
843 VVByPListEndExclusive_r(diskP);
844 Log("VShutdown: %s stats : (pass[0]=%d, pass[1]=%d, pass[2]=%d, pass[3]=%d)\n",
845 VPartitionPath(diskP),
846 params.stats[0][diskP->device],
847 params.stats[1][diskP->device],
848 params.stats[2][diskP->device],
849 params.stats[3][diskP->device]);
852 Log("VShutdown: shutdown finished using %d threads\n", params.n_threads);
854 /* if we're only going to run one shutdown thread, don't bother creating
856 Log("VShutdown: beginning single-threaded fileserver shutdown\n");
858 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
859 VShutdownByPartition_r(diskP);
863 Log("VShutdown: complete.\n");
864 #else /* AFS_DEMAND_ATTACH_FS */
865 Log("VShutdown: shutting down on-line volumes...\n");
866 for (i = 0; i < VolumeHashTable.Size; i++) {
867 /* try to hold first volume in the hash table */
868 for (queue_Scan(&VolumeHashTable.Table[i],vp,np,Volume)) {
872 Log("VShutdown: Attempting to take volume %u offline.\n",
875 /* next, take the volume offline (drops reference count) */
876 VOffline_r(vp, "File server was shut down");
880 Log("VShutdown: complete.\n");
881 #endif /* AFS_DEMAND_ATTACH_FS */
892 #ifdef AFS_DEMAND_ATTACH_FS
895 * shutdown control thread
898 ShutdownController(vshutdown_thread_t * params)
901 struct DiskPartition64 * diskP;
903 vshutdown_thread_t shadow;
905 ShutdownCreateSchedule(params);
907 while ((params->pass < 4) &&
908 (params->n_threads_complete < params->n_threads)) {
909 /* recompute schedule once per second */
911 memcpy(&shadow, params, sizeof(vshutdown_thread_t));
915 Log("ShutdownController: schedule version=%d, vol_remaining=%d, pass=%d\n",
916 shadow.schedule_version, shadow.vol_remaining, shadow.pass);
917 Log("ShutdownController: n_threads_complete=%d, n_parts_done_pass=%d\n",
918 shadow.n_threads_complete, shadow.n_parts_done_pass);
919 for (diskP = DiskPartitionList; diskP; diskP=diskP->next) {
921 Log("ShutdownController: part[%d] : (len=%d, thread_target=%d, done_pass=%d, pass_head=%p)\n",
924 shadow.part_thread_target[id],
925 shadow.part_done_pass[id],
926 shadow.part_pass_head[id]);
932 ShutdownCreateSchedule(params);
936 /* create the shutdown thread work schedule.
937 * this scheduler tries to implement fairness
938 * by allocating at least 1 thread to each
939 * partition with volumes to be shutdown,
940 * and then it attempts to allocate remaining
941 * threads based upon the amount of work left
944 ShutdownCreateSchedule(vshutdown_thread_t * params)
946 struct DiskPartition64 * diskP;
947 int sum, thr_workload, thr_left;
948 int part_residue[VOLMAXPARTS+1];
951 /* compute the total number of outstanding volumes */
953 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
954 sum += diskP->vol_list.len;
957 params->schedule_version++;
958 params->vol_remaining = sum;
963 /* compute average per-thread workload */
964 thr_workload = sum / params->n_threads;
965 if (sum % params->n_threads)
968 thr_left = params->n_threads;
969 memset(&part_residue, 0, sizeof(part_residue));
971 /* for fairness, give every partition with volumes remaining
972 * at least one thread */
973 for (diskP = DiskPartitionList; diskP && thr_left; diskP = diskP->next) {
975 if (diskP->vol_list.len) {
976 params->part_thread_target[id] = 1;
979 params->part_thread_target[id] = 0;
983 if (thr_left && thr_workload) {
984 /* compute length-weighted workloads */
987 for (diskP = DiskPartitionList; diskP && thr_left; diskP = diskP->next) {
989 delta = (diskP->vol_list.len / thr_workload) -
990 params->part_thread_target[id];
994 if (delta < thr_left) {
995 params->part_thread_target[id] += delta;
998 params->part_thread_target[id] += thr_left;
1006 /* try to assign any leftover threads to partitions that
1007 * had volume lengths closer to needing thread_target+1 */
1008 int max_residue, max_id;
1010 /* compute the residues */
1011 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1013 part_residue[id] = diskP->vol_list.len -
1014 (params->part_thread_target[id] * thr_workload);
1017 /* now try to allocate remaining threads to partitions with the
1018 * highest residues */
1021 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1023 if (part_residue[id] > max_residue) {
1024 max_residue = part_residue[id];
1033 params->part_thread_target[max_id]++;
1035 part_residue[max_id] = 0;
1040 /* punt and give any remaining threads equally to each partition */
1042 if (thr_left >= params->n_parts) {
1043 alloc = thr_left / params->n_parts;
1044 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1046 params->part_thread_target[id] += alloc;
1051 /* finish off the last of the threads */
1052 for (diskP = DiskPartitionList; thr_left && diskP; diskP = diskP->next) {
1054 params->part_thread_target[id]++;
1060 /* worker thread for parallel shutdown */
1062 VShutdownThread(void * args)
1064 struct rx_queue *qp;
1066 vshutdown_thread_t * params;
1067 int part, code, found, pass, schedule_version_save, count;
1068 struct DiskPartition64 *diskP;
1069 struct diskpartition_queue_t * dpq;
1072 params = (vshutdown_thread_t *) args;
1074 /* acquire the shutdown pass 0 lock */
1075 assert(pthread_mutex_lock(¶ms->lock) == 0);
1077 /* if there's still pass 0 work to be done,
1078 * get a work entry, and do a pass 0 shutdown */
1079 if (queue_IsNotEmpty(params)) {
1080 dpq = queue_First(params, diskpartition_queue_t);
1082 assert(pthread_mutex_unlock(¶ms->lock) == 0);
1088 while (ShutdownVolumeWalk_r(diskP, 0, ¶ms->part_pass_head[id]))
1090 params->stats[0][diskP->device] = count;
1091 assert(pthread_mutex_lock(¶ms->lock) == 0);
1094 params->n_threads_complete++;
1095 if (params->n_threads_complete == params->n_threads) {
1096 /* notify control thread that all workers have completed pass 0 */
1097 assert(pthread_cond_signal(¶ms->master_cv) == 0);
1099 while (params->pass == 0) {
1100 assert(pthread_cond_wait(¶ms->cv, ¶ms->lock) == 0);
1104 assert(pthread_mutex_unlock(¶ms->lock) == 0);
1107 pass = params->pass;
1110 /* now escalate through the more complicated shutdowns */
1112 schedule_version_save = params->schedule_version;
1114 /* find a disk partition to work on */
1115 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1117 if (params->part_thread_target[id] && !params->part_done_pass[id]) {
1118 params->part_thread_target[id]--;
1125 /* hmm. for some reason the controller thread couldn't find anything for
1126 * us to do. let's see if there's anything we can do */
1127 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1129 if (diskP->vol_list.len && !params->part_done_pass[id]) {
1132 } else if (!params->part_done_pass[id]) {
1133 params->part_done_pass[id] = 1;
1134 params->n_parts_done_pass++;
1136 Log("VShutdown: done shutting down volumes on partition %s.\n",
1137 VPartitionPath(diskP));
1143 /* do work on this partition until either the controller
1144 * creates a new schedule, or we run out of things to do
1145 * on this partition */
1148 while (!params->part_done_pass[id] &&
1149 (schedule_version_save == params->schedule_version)) {
1150 /* ShutdownVolumeWalk_r will drop the glock internally */
1151 if (!ShutdownVolumeWalk_r(diskP, pass, ¶ms->part_pass_head[id])) {
1152 if (!params->part_done_pass[id]) {
1153 params->part_done_pass[id] = 1;
1154 params->n_parts_done_pass++;
1156 Log("VShutdown: done shutting down volumes on partition %s.\n",
1157 VPartitionPath(diskP));
1165 params->stats[pass][id] += count;
1167 /* ok, everyone is done this pass, proceed */
1170 params->n_threads_complete++;
1171 while (params->pass == pass) {
1172 if (params->n_threads_complete == params->n_threads) {
1173 /* we are the last thread to complete, so we will
1174 * reinitialize worker pool state for the next pass */
1175 params->n_threads_complete = 0;
1176 params->n_parts_done_pass = 0;
1178 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1180 params->part_done_pass[id] = 0;
1181 params->part_pass_head[id] = queue_First(&diskP->vol_list, rx_queue);
1184 /* compute a new thread schedule before releasing all the workers */
1185 ShutdownCreateSchedule(params);
1187 /* wake up all the workers */
1188 assert(pthread_cond_broadcast(¶ms->cv) == 0);
1191 Log("VShutdown: pass %d completed using %d threads on %d partitions\n",
1192 pass, params->n_threads, params->n_parts);
1195 VOL_CV_WAIT(¶ms->cv);
1198 pass = params->pass;
1212 /* shut down all volumes on a given disk partition
1214 * note that this function will not allow mp-fast
1215 * shutdown of a partition */
1217 VShutdownByPartition_r(struct DiskPartition64 * dp)
1223 /* wait for other exclusive ops to finish */
1224 VVByPListWait_r(dp);
1226 /* begin exclusive access */
1227 VVByPListBeginExclusive_r(dp);
1229 /* pick the low-hanging fruit first,
1230 * then do the complicated ones last
1231 * (has the advantage of keeping
1232 * in-use volumes up until the bitter end) */
1233 for (pass = 0, total=0; pass < 4; pass++) {
1234 pass_stats[pass] = ShutdownVByPForPass_r(dp, pass);
1235 total += pass_stats[pass];
1238 /* end exclusive access */
1239 VVByPListEndExclusive_r(dp);
1241 Log("VShutdownByPartition: shut down %d volumes on %s (pass[0]=%d, pass[1]=%d, pass[2]=%d, pass[3]=%d)\n",
1242 total, VPartitionPath(dp), pass_stats[0], pass_stats[1], pass_stats[2], pass_stats[3]);
1247 /* internal shutdown functionality
1249 * for multi-pass shutdown:
1250 * 0 to only "shutdown" {pre,un}attached and error state volumes
1251 * 1 to also shutdown attached volumes w/ volume header loaded
1252 * 2 to also shutdown attached volumes w/o volume header loaded
1253 * 3 to also shutdown exclusive state volumes
1255 * caller MUST hold exclusive access on the hash chain
1256 * because we drop vol_glock_mutex internally
1258 * this function is reentrant for passes 1--3
1259 * (e.g. multiple threads can cooperate to
1260 * shutdown a partition mp-fast)
1262 * pass 0 is not scaleable because the volume state data is
1263 * synchronized by vol_glock mutex, and the locking overhead
1264 * is too high to drop the lock long enough to do linked list
1268 ShutdownVByPForPass_r(struct DiskPartition64 * dp, int pass)
1270 struct rx_queue * q = queue_First(&dp->vol_list, rx_queue);
1273 while (ShutdownVolumeWalk_r(dp, pass, &q))
1279 /* conditionally shutdown one volume on partition dp
1280 * returns 1 if a volume was shutdown in this pass,
1283 ShutdownVolumeWalk_r(struct DiskPartition64 * dp, int pass,
1284 struct rx_queue ** idx)
1286 struct rx_queue *qp, *nqp;
1291 for (queue_ScanFrom(&dp->vol_list, qp, qp, nqp, rx_queue)) {
1292 vp = (Volume *) (((char *)qp) - offsetof(Volume, vol_list));
1296 if ((V_attachState(vp) != VOL_STATE_UNATTACHED) &&
1297 (V_attachState(vp) != VOL_STATE_ERROR) &&
1298 (V_attachState(vp) != VOL_STATE_PREATTACHED)) {
1302 if ((V_attachState(vp) == VOL_STATE_ATTACHED) &&
1303 (vp->header == NULL)) {
1307 if (VIsExclusiveState(V_attachState(vp))) {
1312 DeleteVolumeFromVByPList_r(vp);
1313 VShutdownVolume_r(vp);
1323 * shutdown a specific volume
1325 /* caller MUST NOT hold a heavyweight ref on vp */
1327 VShutdownVolume_r(Volume * vp)
1331 VCreateReservation_r(vp);
1333 if (LogLevel >= 5) {
1334 Log("VShutdownVolume_r: vid=%u, device=%d, state=%hu\n",
1335 vp->hashid, vp->partition->device, V_attachState(vp));
1338 /* wait for other blocking ops to finish */
1339 VWaitExclusiveState_r(vp);
1341 assert(VIsValidState(V_attachState(vp)));
1343 switch(V_attachState(vp)) {
1344 case VOL_STATE_SALVAGING:
1345 /* make sure salvager knows we don't want
1346 * the volume back */
1347 VCancelSalvage_r(vp, SALVSYNC_SHUTDOWN);
1348 case VOL_STATE_PREATTACHED:
1349 case VOL_STATE_ERROR:
1350 VChangeState_r(vp, VOL_STATE_UNATTACHED);
1351 case VOL_STATE_UNATTACHED:
1353 case VOL_STATE_GOING_OFFLINE:
1354 case VOL_STATE_SHUTTING_DOWN:
1355 case VOL_STATE_ATTACHED:
1359 Log("VShutdown: Attempting to take volume %u offline.\n",
1362 /* take the volume offline (drops reference count) */
1363 VOffline_r(vp, "File server was shut down");
1368 VCancelReservation_r(vp);
1372 #endif /* AFS_DEMAND_ATTACH_FS */
1375 /***************************************************/
1376 /* Header I/O routines */
1377 /***************************************************/
1379 /* open a descriptor for the inode (h),
1380 * read in an on-disk structure into buffer (to) of size (size),
1381 * verify versionstamp in structure has magic (magic) and
1382 * optionally verify version (version) if (version) is nonzero
1385 ReadHeader(Error * ec, IHandle_t * h, char *to, int size, bit32 magic,
1388 struct versionStamp *vsn;
1403 if (FDH_SEEK(fdP, 0, SEEK_SET) < 0) {
1405 FDH_REALLYCLOSE(fdP);
1408 vsn = (struct versionStamp *)to;
1409 if (FDH_READ(fdP, to, size) != size || vsn->magic != magic) {
1411 FDH_REALLYCLOSE(fdP);
1416 /* Check is conditional, in case caller wants to inspect version himself */
1417 if (version && vsn->version != version) {
1423 WriteVolumeHeader_r(Error * ec, Volume * vp)
1425 IHandle_t *h = V_diskDataHandle(vp);
1435 if (FDH_SEEK(fdP, 0, SEEK_SET) < 0) {
1437 FDH_REALLYCLOSE(fdP);
1440 if (FDH_WRITE(fdP, (char *)&V_disk(vp), sizeof(V_disk(vp)))
1441 != sizeof(V_disk(vp))) {
1443 FDH_REALLYCLOSE(fdP);
1449 /* VolumeHeaderToDisk
1450 * Allows for storing 64 bit inode numbers in on-disk volume header
1453 /* convert in-memory representation of a volume header to the
1454 * on-disk representation of a volume header */
1456 VolumeHeaderToDisk(VolumeDiskHeader_t * dh, VolumeHeader_t * h)
1459 memset((char *)dh, 0, sizeof(VolumeDiskHeader_t));
1460 dh->stamp = h->stamp;
1462 dh->parent = h->parent;
1464 #ifdef AFS_64BIT_IOPS_ENV
1465 dh->volumeInfo_lo = (afs_int32) h->volumeInfo & 0xffffffff;
1466 dh->volumeInfo_hi = (afs_int32) (h->volumeInfo >> 32) & 0xffffffff;
1467 dh->smallVnodeIndex_lo = (afs_int32) h->smallVnodeIndex & 0xffffffff;
1468 dh->smallVnodeIndex_hi =
1469 (afs_int32) (h->smallVnodeIndex >> 32) & 0xffffffff;
1470 dh->largeVnodeIndex_lo = (afs_int32) h->largeVnodeIndex & 0xffffffff;
1471 dh->largeVnodeIndex_hi =
1472 (afs_int32) (h->largeVnodeIndex >> 32) & 0xffffffff;
1473 dh->linkTable_lo = (afs_int32) h->linkTable & 0xffffffff;
1474 dh->linkTable_hi = (afs_int32) (h->linkTable >> 32) & 0xffffffff;
1476 dh->volumeInfo_lo = h->volumeInfo;
1477 dh->smallVnodeIndex_lo = h->smallVnodeIndex;
1478 dh->largeVnodeIndex_lo = h->largeVnodeIndex;
1479 dh->linkTable_lo = h->linkTable;
1483 /* DiskToVolumeHeader
1484 * Converts an on-disk representation of a volume header to
1485 * the in-memory representation of a volume header.
1487 * Makes the assumption that AFS has *always*
1488 * zero'd the volume header file so that high parts of inode
1489 * numbers are 0 in older (SGI EFS) volume header files.
1492 DiskToVolumeHeader(VolumeHeader_t * h, VolumeDiskHeader_t * dh)
1494 memset((char *)h, 0, sizeof(VolumeHeader_t));
1495 h->stamp = dh->stamp;
1497 h->parent = dh->parent;
1499 #ifdef AFS_64BIT_IOPS_ENV
1501 (Inode) dh->volumeInfo_lo | ((Inode) dh->volumeInfo_hi << 32);
1503 h->smallVnodeIndex =
1504 (Inode) dh->smallVnodeIndex_lo | ((Inode) dh->
1505 smallVnodeIndex_hi << 32);
1507 h->largeVnodeIndex =
1508 (Inode) dh->largeVnodeIndex_lo | ((Inode) dh->
1509 largeVnodeIndex_hi << 32);
1511 (Inode) dh->linkTable_lo | ((Inode) dh->linkTable_hi << 32);
1513 h->volumeInfo = dh->volumeInfo_lo;
1514 h->smallVnodeIndex = dh->smallVnodeIndex_lo;
1515 h->largeVnodeIndex = dh->largeVnodeIndex_lo;
1516 h->linkTable = dh->linkTable_lo;
1521 /***************************************************/
1522 /* Volume Attachment routines */
1523 /***************************************************/
1525 #ifdef AFS_DEMAND_ATTACH_FS
1527 * pre-attach a volume given its path.
1529 * @param[out] ec outbound error code
1530 * @param[in] partition partition path string
1531 * @param[in] name volume id string
1533 * @return volume object pointer
1535 * @note A pre-attached volume will only have its partition
1536 * and hashid fields initialized. At first call to
1537 * VGetVolume, the volume will be fully attached.
1541 VPreAttachVolumeByName(Error * ec, char *partition, char *name)
1545 vp = VPreAttachVolumeByName_r(ec, partition, name);
1551 * pre-attach a volume given its path.
1553 * @param[out] ec outbound error code
1554 * @param[in] partition path to vice partition
1555 * @param[in] name volume id string
1557 * @return volume object pointer
1559 * @pre VOL_LOCK held
1561 * @internal volume package internal use only.
1564 VPreAttachVolumeByName_r(Error * ec, char *partition, char *name)
1566 return VPreAttachVolumeById_r(ec,
1568 VolumeNumber(name));
1572 * pre-attach a volume given its path and numeric volume id.
1574 * @param[out] ec error code return
1575 * @param[in] partition path to vice partition
1576 * @param[in] volumeId numeric volume id
1578 * @return volume object pointer
1580 * @pre VOL_LOCK held
1582 * @internal volume package internal use only.
1585 VPreAttachVolumeById_r(Error * ec,
1590 struct DiskPartition64 *partp;
1594 assert(programType == fileServer);
1596 if (!(partp = VGetPartition_r(partition, 0))) {
1598 Log("VPreAttachVolumeById_r: Error getting partition (%s)\n", partition);
1602 vp = VLookupVolume_r(ec, volumeId, NULL);
1607 return VPreAttachVolumeByVp_r(ec, partp, vp, volumeId);
1611 * preattach a volume.
1613 * @param[out] ec outbound error code
1614 * @param[in] partp pointer to partition object
1615 * @param[in] vp pointer to volume object
1616 * @param[in] vid volume id
1618 * @return volume object pointer
1620 * @pre VOL_LOCK is held.
1622 * @warning Returned volume object pointer does not have to
1623 * equal the pointer passed in as argument vp. There
1624 * are potential race conditions which can result in
1625 * the pointers having different values. It is up to
1626 * the caller to make sure that references are handled
1627 * properly in this case.
1629 * @note If there is already a volume object registered with
1630 * the same volume id, its pointer MUST be passed as
1631 * argument vp. Failure to do so will result in a silent
1632 * failure to preattach.
1634 * @internal volume package internal use only.
1637 VPreAttachVolumeByVp_r(Error * ec,
1638 struct DiskPartition64 * partp,
1646 /* check to see if pre-attach already happened */
1648 (V_attachState(vp) != VOL_STATE_UNATTACHED) &&
1649 (V_attachState(vp) != VOL_STATE_PREATTACHED) &&
1650 !VIsErrorState(V_attachState(vp))) {
1652 * pre-attach is a no-op in all but the following cases:
1654 * - volume is unattached
1655 * - volume is in an error state
1656 * - volume is pre-attached
1658 Log("VPreattachVolumeByVp_r: volume %u not in quiescent state\n", vid);
1661 /* we're re-attaching a volume; clear out some old state */
1662 memset(&vp->salvage, 0, sizeof(struct VolumeOnlineSalvage));
1664 if (V_partition(vp) != partp) {
1665 /* XXX potential race */
1666 DeleteVolumeFromVByPList_r(vp);
1669 /* if we need to allocate a new Volume struct,
1670 * go ahead and drop the vol glock, otherwise
1671 * do the basic setup synchronised, as it's
1672 * probably not worth dropping the lock */
1675 /* allocate the volume structure */
1676 vp = nvp = (Volume *) malloc(sizeof(Volume));
1678 memset(vp, 0, sizeof(Volume));
1679 queue_Init(&vp->vnode_list);
1680 assert(pthread_cond_init(&V_attachCV(vp), NULL) == 0);
1683 /* link the volume with its associated vice partition */
1684 vp->device = partp->device;
1685 vp->partition = partp;
1688 /* if we dropped the lock, reacquire the lock,
1689 * check for pre-attach races, and then add
1690 * the volume to the hash table */
1693 nvp = VLookupVolume_r(ec, vid, NULL);
1698 } else if (nvp) { /* race detected */
1703 /* hack to make up for VChangeState_r() decrementing
1704 * the old state counter */
1705 VStats.state_levels[0]++;
1709 /* put pre-attached volume onto the hash table
1710 * and bring it up to the pre-attached state */
1711 AddVolumeToHashTable(vp, vp->hashid);
1712 AddVolumeToVByPList_r(vp);
1713 VLRU_Init_Node_r(vp);
1714 VChangeState_r(vp, VOL_STATE_PREATTACHED);
1717 Log("VPreAttachVolumeByVp_r: volume %u pre-attached\n", vp->hashid);
1725 #endif /* AFS_DEMAND_ATTACH_FS */
1727 /* Attach an existing volume, given its pathname, and return a
1728 pointer to the volume header information. The volume also
1729 normally goes online at this time. An offline volume
1730 must be reattached to make it go online */
1732 VAttachVolumeByName(Error * ec, char *partition, char *name, int mode)
1736 retVal = VAttachVolumeByName_r(ec, partition, name, mode);
1742 VAttachVolumeByName_r(Error * ec, char *partition, char *name, int mode)
1744 register Volume *vp = NULL, *svp = NULL;
1746 struct afs_stat status;
1747 struct VolumeDiskHeader diskHeader;
1748 struct VolumeHeader iheader;
1749 struct DiskPartition64 *partp;
1753 #ifdef AFS_DEMAND_ATTACH_FS
1754 VolumeStats stats_save;
1755 #endif /* AFS_DEMAND_ATTACH_FS */
1759 volumeId = VolumeNumber(name);
1761 if (!(partp = VGetPartition_r(partition, 0))) {
1763 Log("VAttachVolume: Error getting partition (%s)\n", partition);
1767 if (programType == volumeUtility) {
1769 VLockPartition_r(partition);
1770 } else if (programType == fileServer) {
1771 #ifdef AFS_DEMAND_ATTACH_FS
1772 /* lookup the volume in the hash table */
1773 vp = VLookupVolume_r(ec, volumeId, NULL);
1779 /* save any counters that are supposed to
1780 * be monotonically increasing over the
1781 * lifetime of the fileserver */
1782 memcpy(&stats_save, &vp->stats, sizeof(VolumeStats));
1784 memset(&stats_save, 0, sizeof(VolumeStats));
1787 /* if there's something in the hash table, and it's not
1788 * in the pre-attach state, then we may need to detach
1789 * it before proceeding */
1790 if (vp && (V_attachState(vp) != VOL_STATE_PREATTACHED)) {
1791 VCreateReservation_r(vp);
1792 VWaitExclusiveState_r(vp);
1794 /* at this point state must be one of:
1803 if (vp->specialStatus == VBUSY)
1806 /* if it's already attached, see if we can return it */
1807 if (V_attachState(vp) == VOL_STATE_ATTACHED) {
1808 VGetVolumeByVp_r(ec, vp);
1809 if (V_inUse(vp) == fileServer) {
1810 VCancelReservation_r(vp);
1814 /* otherwise, we need to detach, and attempt to re-attach */
1815 VDetachVolume_r(ec, vp);
1817 Log("VAttachVolume: Error detaching old volume instance (%s)\n", name);
1820 /* if it isn't fully attached, delete from the hash tables,
1821 and let the refcounter handle the rest */
1822 DeleteVolumeFromHashTable(vp);
1823 DeleteVolumeFromVByPList_r(vp);
1826 VCancelReservation_r(vp);
1830 /* pre-attach volume if it hasn't been done yet */
1832 (V_attachState(vp) == VOL_STATE_UNATTACHED) ||
1833 (V_attachState(vp) == VOL_STATE_ERROR)) {
1835 vp = VPreAttachVolumeByVp_r(ec, partp, vp, volumeId);
1843 /* handle pre-attach races
1845 * multiple threads can race to pre-attach a volume,
1846 * but we can't let them race beyond that
1848 * our solution is to let the first thread to bring
1849 * the volume into an exclusive state win; the other
1850 * threads just wait until it finishes bringing the
1851 * volume online, and then they do a vgetvolumebyvp
1853 if (svp && (svp != vp)) {
1854 /* wait for other exclusive ops to finish */
1855 VCreateReservation_r(vp);
1856 VWaitExclusiveState_r(vp);
1858 /* get a heavyweight ref, kill the lightweight ref, and return */
1859 VGetVolumeByVp_r(ec, vp);
1860 VCancelReservation_r(vp);
1864 /* at this point, we are chosen as the thread to do
1865 * demand attachment for this volume. all other threads
1866 * doing a getvolume on vp->hashid will block until we finish */
1868 /* make sure any old header cache entries are invalidated
1869 * before proceeding */
1870 FreeVolumeHeader(vp);
1872 VChangeState_r(vp, VOL_STATE_ATTACHING);
1874 /* restore any saved counters */
1875 memcpy(&vp->stats, &stats_save, sizeof(VolumeStats));
1876 #else /* AFS_DEMAND_ATTACH_FS */
1877 vp = VGetVolume_r(ec, volumeId);
1879 if (V_inUse(vp) == fileServer)
1881 if (vp->specialStatus == VBUSY)
1883 VDetachVolume_r(ec, vp);
1885 Log("VAttachVolume: Error detaching volume (%s)\n", name);
1889 #endif /* AFS_DEMAND_ATTACH_FS */
1893 strcpy(path, VPartitionPath(partp));
1899 if ((fd = afs_open(path, O_RDONLY)) == -1 || afs_fstat(fd, &status) == -1) {
1900 Log("VAttachVolume: Failed to open %s (errno %d)\n", path, errno);
1907 n = read(fd, &diskHeader, sizeof(diskHeader));
1909 if (n != sizeof(diskHeader)
1910 || diskHeader.stamp.magic != VOLUMEHEADERMAGIC) {
1911 Log("VAttachVolume: Error reading volume header %s\n", path);
1916 if (diskHeader.stamp.version != VOLUMEHEADERVERSION) {
1917 Log("VAttachVolume: Volume %s, version number is incorrect; volume needs salvaged\n", path);
1923 DiskToVolumeHeader(&iheader, &diskHeader);
1924 #ifdef FSSYNC_BUILD_CLIENT
1925 if (programType == volumeUtility && mode != V_SECRETLY && mode != V_PEEK) {
1927 if (FSYNC_VolOp(iheader.id, partition, FSYNC_VOL_NEEDVOLUME, mode, NULL)
1929 Log("VAttachVolume: attach of volume %u apparently denied by file server\n", iheader.id);
1930 *ec = VNOVOL; /* XXXX */
1938 vp = (Volume *) calloc(1, sizeof(Volume));
1940 vp->device = partp->device;
1941 vp->partition = partp;
1942 queue_Init(&vp->vnode_list);
1943 #ifdef AFS_DEMAND_ATTACH_FS
1944 assert(pthread_cond_init(&V_attachCV(vp), NULL) == 0);
1945 #endif /* AFS_DEMAND_ATTACH_FS */
1948 /* attach2 is entered without any locks, and returns
1949 * with vol_glock_mutex held */
1950 vp = attach2(ec, volumeId, path, &iheader, partp, vp, isbusy, mode);
1952 if (programType == volumeUtility && vp) {
1953 if ((mode == V_VOLUPD) || (VolumeWriteable(vp) && (mode == V_CLONE))) {
1954 /* mark volume header as in use so that volser crashes lead to a
1955 * salvage attempt */
1956 VUpdateVolume_r(ec, vp, 0);
1958 #ifdef AFS_DEMAND_ATTACH_FS
1959 /* for dafs, we should tell the fileserver, except for V_PEEK
1960 * where we know it is not necessary */
1961 if (mode == V_PEEK) {
1962 vp->needsPutBack = 0;
1964 vp->needsPutBack = 1;
1966 #else /* !AFS_DEMAND_ATTACH_FS */
1967 /* duplicate computation in fssync.c about whether the server
1968 * takes the volume offline or not. If the volume isn't
1969 * offline, we must not return it when we detach the volume,
1970 * or the server will abort */
1971 if (mode == V_READONLY || mode == V_PEEK
1972 || (!VolumeWriteable(vp) && (mode == V_CLONE || mode == V_DUMP)))
1973 vp->needsPutBack = 0;
1975 vp->needsPutBack = 1;
1976 #endif /* !AFS_DEMAND_ATTACH_FS */
1978 /* OK, there's a problem here, but one that I don't know how to
1979 * fix right now, and that I don't think should arise often.
1980 * Basically, we should only put back this volume to the server if
1981 * it was given to us by the server, but since we don't have a vp,
1982 * we can't run the VolumeWriteable function to find out as we do
1983 * above when computing vp->needsPutBack. So we send it back, but
1984 * there's a path in VAttachVolume on the server which may abort
1985 * if this volume doesn't have a header. Should be pretty rare
1986 * for all of that to happen, but if it does, probably the right
1987 * fix is for the server to allow the return of readonly volumes
1988 * that it doesn't think are really checked out. */
1989 #ifdef FSSYNC_BUILD_CLIENT
1990 if (programType == volumeUtility && vp == NULL &&
1991 mode != V_SECRETLY && mode != V_PEEK) {
1992 FSYNC_VolOp(iheader.id, partition, FSYNC_VOL_ON, 0, NULL);
1995 if (programType == fileServer && vp) {
1996 #ifdef AFS_DEMAND_ATTACH_FS
1998 * we can get here in cases where we don't "own"
1999 * the volume (e.g. volume owned by a utility).
2000 * short circuit around potential disk header races.
2002 if (V_attachState(vp) != VOL_STATE_ATTACHED) {
2006 V_needsCallback(vp) = 0;
2008 if (VInit >= 2 && V_BreakVolumeCallbacks) {
2009 Log("VAttachVolume: Volume %u was changed externally; breaking callbacks\n", V_id(vp));
2010 (*V_BreakVolumeCallbacks) (V_id(vp));
2013 VUpdateVolume_r(ec, vp, 0);
2015 Log("VAttachVolume: Error updating volume\n");
2020 if (VolumeWriteable(vp) && V_dontSalvage(vp) == 0) {
2021 #ifndef AFS_DEMAND_ATTACH_FS
2022 /* This is a hack: by temporarily setting the incore
2023 * dontSalvage flag ON, the volume will be put back on the
2024 * Update list (with dontSalvage OFF again). It will then
2025 * come back in N minutes with DONT_SALVAGE eventually
2026 * set. This is the way that volumes that have never had
2027 * it set get it set; or that volumes that have been
2028 * offline without DONT SALVAGE having been set also
2029 * eventually get it set */
2030 V_dontSalvage(vp) = DONT_SALVAGE;
2031 #endif /* !AFS_DEMAND_ATTACH_FS */
2032 VAddToVolumeUpdateList_r(ec, vp);
2034 Log("VAttachVolume: Error adding volume to update list\n");
2041 Log("VOnline: volume %u (%s) attached and online\n", V_id(vp),
2046 if (programType == volumeUtility) {
2047 VUnlockPartition_r(partition);
2050 #ifdef AFS_DEMAND_ATTACH_FS
2051 /* attach failed; make sure we're in error state */
2052 if (vp && !VIsErrorState(V_attachState(vp))) {
2053 VChangeState_r(vp, VOL_STATE_ERROR);
2055 #endif /* AFS_DEMAND_ATTACH_FS */
2062 #ifdef AFS_DEMAND_ATTACH_FS
2063 /* VAttachVolumeByVp_r
2065 * finish attaching a volume that is
2066 * in a less than fully attached state
2068 /* caller MUST hold a ref count on vp */
2070 VAttachVolumeByVp_r(Error * ec, Volume * vp, int mode)
2072 char name[VMAXPATHLEN];
2073 int fd, n, reserve = 0;
2074 struct afs_stat status;
2075 struct VolumeDiskHeader diskHeader;
2076 struct VolumeHeader iheader;
2077 struct DiskPartition64 *partp;
2082 VolumeStats stats_save;
2085 /* volume utility should never call AttachByVp */
2086 assert(programType == fileServer);
2088 volumeId = vp->hashid;
2089 partp = vp->partition;
2090 VolumeExternalName_r(volumeId, name, sizeof(name));
2093 /* if another thread is performing a blocking op, wait */
2094 VWaitExclusiveState_r(vp);
2096 memcpy(&stats_save, &vp->stats, sizeof(VolumeStats));
2098 /* if it's already attached, see if we can return it */
2099 if (V_attachState(vp) == VOL_STATE_ATTACHED) {
2100 VGetVolumeByVp_r(ec, vp);
2101 if (V_inUse(vp) == fileServer) {
2104 if (vp->specialStatus == VBUSY)
2106 VDetachVolume_r(ec, vp);
2108 Log("VAttachVolume: Error detaching volume (%s)\n", name);
2114 /* pre-attach volume if it hasn't been done yet */
2116 (V_attachState(vp) == VOL_STATE_UNATTACHED) ||
2117 (V_attachState(vp) == VOL_STATE_ERROR)) {
2118 nvp = VPreAttachVolumeByVp_r(ec, partp, vp, volumeId);
2124 VCreateReservation_r(nvp);
2130 VChangeState_r(vp, VOL_STATE_ATTACHING);
2132 /* restore monotonically increasing stats */
2133 memcpy(&vp->stats, &stats_save, sizeof(VolumeStats));
2138 /* compute path to disk header,
2140 * and verify magic and version stamps */
2141 strcpy(path, VPartitionPath(partp));
2147 if ((fd = afs_open(path, O_RDONLY)) == -1 || afs_fstat(fd, &status) == -1) {
2148 Log("VAttachVolume: Failed to open %s (errno %d)\n", path, errno);
2155 n = read(fd, &diskHeader, sizeof(diskHeader));
2157 if (n != sizeof(diskHeader)
2158 || diskHeader.stamp.magic != VOLUMEHEADERMAGIC) {
2159 Log("VAttachVolume: Error reading volume header %s\n", path);
2164 if (diskHeader.stamp.version != VOLUMEHEADERVERSION) {
2165 Log("VAttachVolume: Volume %s, version number is incorrect; volume needs salvaged\n", path);
2171 /* convert on-disk header format to in-memory header format */
2172 DiskToVolumeHeader(&iheader, &diskHeader);
2176 * NOTE: attach2 is entered without any locks, and returns
2177 * with vol_glock_mutex held */
2178 vp = attach2(ec, volumeId, path, &iheader, partp, vp, isbusy, mode);
2181 * the event that an error was encountered, or
2182 * the volume was not brought to an attached state
2183 * for any reason, skip to the end. We cannot
2184 * safely call VUpdateVolume unless we "own" it.
2188 (V_attachState(vp) != VOL_STATE_ATTACHED)) {
2192 V_needsCallback(vp) = 0;
2193 VUpdateVolume_r(ec, vp, 0);
2195 Log("VAttachVolume: Error updating volume %u\n", vp->hashid);
2199 if (VolumeWriteable(vp) && V_dontSalvage(vp) == 0) {
2200 #ifndef AFS_DEMAND_ATTACH_FS
2201 /* This is a hack: by temporarily setting the incore
2202 * dontSalvage flag ON, the volume will be put back on the
2203 * Update list (with dontSalvage OFF again). It will then
2204 * come back in N minutes with DONT_SALVAGE eventually
2205 * set. This is the way that volumes that have never had
2206 * it set get it set; or that volumes that have been
2207 * offline without DONT SALVAGE having been set also
2208 * eventually get it set */
2209 V_dontSalvage(vp) = DONT_SALVAGE;
2210 #endif /* !AFS_DEMAND_ATTACH_FS */
2211 VAddToVolumeUpdateList_r(ec, vp);
2213 Log("VAttachVolume: Error adding volume %u to update list\n", vp->hashid);
2220 Log("VOnline: volume %u (%s) attached and online\n", V_id(vp),
2224 VCancelReservation_r(nvp);
2227 if (*ec && (*ec != VOFFLINE) && (*ec != VSALVAGE)) {
2228 if (vp && !VIsErrorState(V_attachState(vp))) {
2229 VChangeState_r(vp, VOL_STATE_ERROR);
2236 #endif /* AFS_DEMAND_ATTACH_FS */
2239 * called without any locks held
2240 * returns with vol_glock_mutex held
2243 attach2(Error * ec, VolId volumeId, char *path, register struct VolumeHeader * header,
2244 struct DiskPartition64 * partp, register Volume * vp, int isbusy, int mode)
2246 vp->specialStatus = (byte) (isbusy ? VBUSY : 0);
2247 IH_INIT(vp->vnodeIndex[vLarge].handle, partp->device, header->parent,
2248 header->largeVnodeIndex);
2249 IH_INIT(vp->vnodeIndex[vSmall].handle, partp->device, header->parent,
2250 header->smallVnodeIndex);
2251 IH_INIT(vp->diskDataHandle, partp->device, header->parent,
2252 header->volumeInfo);
2253 IH_INIT(vp->linkHandle, partp->device, header->parent, header->linkTable);
2254 vp->shuttingDown = 0;
2255 vp->goingOffline = 0;
2257 #ifdef AFS_DEMAND_ATTACH_FS
2258 vp->stats.last_attach = FT_ApproxTime();
2259 vp->stats.attaches++;
2263 IncUInt64(&VStats.attaches);
2264 vp->cacheCheck = ++VolumeCacheCheck;
2265 /* just in case this ever rolls over */
2266 if (!vp->cacheCheck)
2267 vp->cacheCheck = ++VolumeCacheCheck;
2268 GetVolumeHeader(vp);
2271 #if defined(AFS_DEMAND_ATTACH_FS) && defined(FSSYNC_BUILD_CLIENT)
2272 /* demand attach changes the V_PEEK mechanism
2274 * we can now suck the current disk data structure over
2275 * the fssync interface without going to disk
2277 * (technically, we don't need to restrict this feature
2278 * to demand attach fileservers. However, I'm trying
2279 * to limit the number of common code changes)
2281 if (programType != fileServer && mode == V_PEEK) {
2283 res.payload.len = sizeof(VolumeDiskData);
2284 res.payload.buf = &vp->header->diskstuff;
2286 if (FSYNC_VolOp(volumeId,
2287 VPartitionPath(partp),
2288 FSYNC_VOL_QUERY_HDR,
2291 goto disk_header_loaded;
2294 #endif /* AFS_DEMAND_ATTACH_FS && FSSYNC_BUILD_CLIENT */
2295 (void)ReadHeader(ec, V_diskDataHandle(vp), (char *)&V_disk(vp),
2296 sizeof(V_disk(vp)), VOLUMEINFOMAGIC, VOLUMEINFOVERSION);
2298 #ifdef AFS_DEMAND_ATTACH_FS
2301 IncUInt64(&VStats.hdr_loads);
2302 IncUInt64(&vp->stats.hdr_loads);
2304 #endif /* AFS_DEMAND_ATTACH_FS */
2307 Log("VAttachVolume: Error reading diskDataHandle vol header %s; error=%u\n", path, *ec);
2312 #ifdef AFS_DEMAND_ATTACH_FS
2315 /* check for pending volume operations */
2316 if (vp->pending_vol_op) {
2317 /* see if the pending volume op requires exclusive access */
2318 if (!VVolOpLeaveOnline_r(vp, vp->pending_vol_op)) {
2319 /* mark the volume down */
2321 VChangeState_r(vp, VOL_STATE_UNATTACHED);
2322 if (V_offlineMessage(vp)[0] == '\0')
2323 strlcpy(V_offlineMessage(vp),
2324 "A volume utility is running.",
2325 sizeof(V_offlineMessage(vp)));
2326 V_offlineMessage(vp)[sizeof(V_offlineMessage(vp)) - 1] = '\0';
2328 /* check to see if we should set the specialStatus flag */
2329 if (VVolOpSetVBusy_r(vp, vp->pending_vol_op)) {
2330 vp->specialStatus = VBUSY;
2335 V_attachFlags(vp) |= VOL_HDR_LOADED;
2336 vp->stats.last_hdr_load = vp->stats.last_attach;
2338 #endif /* AFS_DEMAND_ATTACH_FS */
2341 struct IndexFileHeader iHead;
2343 #if OPENAFS_VOL_STATS
2345 * We just read in the diskstuff part of the header. If the detailed
2346 * volume stats area has not yet been initialized, we should bzero the
2347 * area and mark it as initialized.
2349 if (!(V_stat_initialized(vp))) {
2350 memset((char *)(V_stat_area(vp)), 0, VOL_STATS_BYTES);
2351 V_stat_initialized(vp) = 1;
2353 #endif /* OPENAFS_VOL_STATS */
2355 (void)ReadHeader(ec, vp->vnodeIndex[vSmall].handle,
2356 (char *)&iHead, sizeof(iHead),
2357 SMALLINDEXMAGIC, SMALLINDEXVERSION);
2360 Log("VAttachVolume: Error reading smallVnode vol header %s; error=%u\n", path, *ec);
2365 struct IndexFileHeader iHead;
2367 (void)ReadHeader(ec, vp->vnodeIndex[vLarge].handle,
2368 (char *)&iHead, sizeof(iHead),
2369 LARGEINDEXMAGIC, LARGEINDEXVERSION);
2372 Log("VAttachVolume: Error reading largeVnode vol header %s; error=%u\n", path, *ec);
2376 #ifdef AFS_NAMEI_ENV
2378 struct versionStamp stamp;
2380 (void)ReadHeader(ec, V_linkHandle(vp), (char *)&stamp,
2381 sizeof(stamp), LINKTABLEMAGIC, LINKTABLEVERSION);
2384 Log("VAttachVolume: Error reading namei vol header %s; error=%u\n", path, *ec);
2387 #endif /* AFS_NAMEI_ENV */
2389 #if defined(AFS_DEMAND_ATTACH_FS)
2390 if (*ec && ((*ec != VOFFLINE) || (V_attachState(vp) != VOL_STATE_UNATTACHED))) {
2392 if (programType == fileServer) {
2393 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
2396 Log("VAttachVolume: Error attaching volume %s; volume needs salvage; error=%u\n", path, *ec);
2402 /* volume operation in progress */
2406 #else /* AFS_DEMAND_ATTACH_FS */
2408 Log("VAttachVolume: Error attaching volume %s; volume needs salvage; error=%u\n", path, *ec);
2413 #endif /* AFS_DEMAND_ATTACH_FS */
2415 if (V_needsSalvaged(vp)) {
2416 if (vp->specialStatus)
2417 vp->specialStatus = 0;
2419 #if defined(AFS_DEMAND_ATTACH_FS)
2420 if (programType == fileServer) {
2421 VRequestSalvage_r(ec, vp, SALVSYNC_NEEDED, VOL_SALVAGE_INVALIDATE_HEADER);
2424 Log("VAttachVolume: volume salvage flag is ON for %s; volume needs salvage\n", path);
2428 #else /* AFS_DEMAND_ATTACH_FS */
2431 #endif /* AFS_DEMAND_ATTACH_FS */
2436 if (programType == fileServer) {
2437 #ifndef FAST_RESTART
2438 if (V_inUse(vp) && VolumeWriteable(vp)) {
2439 if (!V_needsSalvaged(vp)) {
2440 V_needsSalvaged(vp) = 1;
2441 VUpdateVolume_r(ec, vp, 0);
2443 #if defined(AFS_DEMAND_ATTACH_FS)
2444 VRequestSalvage_r(ec, vp, SALVSYNC_NEEDED, VOL_SALVAGE_INVALIDATE_HEADER);
2446 #else /* AFS_DEMAND_ATTACH_FS */
2447 Log("VAttachVolume: volume %s needs to be salvaged; not attached.\n", path);
2450 #endif /* AFS_DEMAND_ATTACH_FS */
2453 #endif /* FAST_RESTART */
2455 if (V_destroyMe(vp) == DESTROY_ME) {
2456 #if defined(AFS_DEMAND_ATTACH_FS)
2457 /* schedule a salvage so the volume goes away on disk */
2458 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
2459 VChangeState_r(vp, VOL_STATE_ERROR);
2461 #endif /* AFS_DEMAND_ATTACH_FS */
2463 Log("VAttachVolume: volume %s is junk; it should be destroyed at next salvage\n", path);
2469 vp->nextVnodeUnique = V_uniquifier(vp);
2470 vp->vnodeIndex[vSmall].bitmap = vp->vnodeIndex[vLarge].bitmap = NULL;
2471 #ifndef BITMAP_LATER
2472 if (programType == fileServer && VolumeWriteable(vp)) {
2474 for (i = 0; i < nVNODECLASSES; i++) {
2475 VGetBitmap_r(ec, vp, i);
2477 #ifdef AFS_DEMAND_ATTACH_FS
2478 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
2480 #else /* AFS_DEMAND_ATTACH_FS */
2482 #endif /* AFS_DEMAND_ATTACH_FS */
2483 Log("VAttachVolume: error getting bitmap for volume (%s)\n",
2489 #endif /* BITMAP_LATER */
2491 if (programType == fileServer) {
2492 if (vp->specialStatus)
2493 vp->specialStatus = 0;
2494 if (V_blessed(vp) && V_inService(vp) && !V_needsSalvaged(vp)) {
2495 V_inUse(vp) = fileServer;
2496 V_offlineMessage(vp)[0] = '\0';
2499 V_inUse(vp) = programType;
2500 V_checkoutMode(vp) = mode;
2503 AddVolumeToHashTable(vp, V_id(vp));
2504 #ifdef AFS_DEMAND_ATTACH_FS
2505 AddVolumeToVByPList_r(vp);
2507 if ((programType != fileServer) ||
2508 (V_inUse(vp) == fileServer)) {
2509 VChangeState_r(vp, VOL_STATE_ATTACHED);
2511 VChangeState_r(vp, VOL_STATE_UNATTACHED);
2517 /* Attach an existing volume.
2518 The volume also normally goes online at this time.
2519 An offline volume must be reattached to make it go online.
2523 VAttachVolume(Error * ec, VolumeId volumeId, int mode)
2527 retVal = VAttachVolume_r(ec, volumeId, mode);
2533 VAttachVolume_r(Error * ec, VolumeId volumeId, int mode)
2536 GetVolumePath(ec, volumeId, &part, &name);
2538 register Volume *vp;
2540 vp = VGetVolume_r(&error, volumeId);
2542 assert(V_inUse(vp) == 0);
2543 VDetachVolume_r(ec, vp);
2547 return VAttachVolumeByName_r(ec, part, name, mode);
2550 /* Increment a reference count to a volume, sans context swaps. Requires
2551 * possibly reading the volume header in from the disk, since there's
2552 * an invariant in the volume package that nUsers>0 ==> vp->header is valid.
2554 * N.B. This call can fail if we can't read in the header!! In this case
2555 * we still guarantee we won't context swap, but the ref count won't be
2556 * incremented (otherwise we'd violate the invariant).
2558 /* NOTE: with the demand attach fileserver extensions, the global lock
2559 * is dropped within VHold */
2560 #ifdef AFS_DEMAND_ATTACH_FS
2562 VHold_r(register Volume * vp)
2566 VCreateReservation_r(vp);
2567 VWaitExclusiveState_r(vp);
2569 LoadVolumeHeader(&error, vp);
2571 VCancelReservation_r(vp);
2575 VCancelReservation_r(vp);
2578 #else /* AFS_DEMAND_ATTACH_FS */
2580 VHold_r(register Volume * vp)
2584 LoadVolumeHeader(&error, vp);
2590 #endif /* AFS_DEMAND_ATTACH_FS */
2593 VHold(register Volume * vp)
2597 retVal = VHold_r(vp);
2603 /***************************************************/
2604 /* get and put volume routines */
2605 /***************************************************/
2608 * put back a heavyweight reference to a volume object.
2610 * @param[in] vp volume object pointer
2612 * @pre VOL_LOCK held
2614 * @post heavyweight volume reference put back.
2615 * depending on state, volume may have been taken offline,
2616 * detached, salvaged, freed, etc.
2618 * @internal volume package internal use only
2621 VPutVolume_r(register Volume * vp)
2623 assert(--vp->nUsers >= 0);
2624 if (vp->nUsers == 0) {
2626 ReleaseVolumeHeader(vp->header);
2627 #ifdef AFS_DEMAND_ATTACH_FS
2628 if (!VCheckDetach(vp)) {
2632 #else /* AFS_DEMAND_ATTACH_FS */
2634 #endif /* AFS_DEMAND_ATTACH_FS */
2639 VPutVolume(register Volume * vp)
2647 /* Get a pointer to an attached volume. The pointer is returned regardless
2648 of whether or not the volume is in service or on/off line. An error
2649 code, however, is returned with an indication of the volume's status */
2651 VGetVolume(Error * ec, Error * client_ec, VolId volumeId)
2655 retVal = GetVolume(ec, client_ec, volumeId, NULL, 0);
2661 VGetVolume_r(Error * ec, VolId volumeId)
2663 return GetVolume(ec, NULL, volumeId, NULL, 0);
2666 /* try to get a volume we've previously looked up */
2667 /* for demand attach fs, caller MUST NOT hold a ref count on vp */
2669 VGetVolumeByVp_r(Error * ec, Volume * vp)
2671 return GetVolume(ec, NULL, vp->hashid, vp, 0);
2674 /* private interface for getting a volume handle
2675 * volumeId must be provided.
2676 * hint is an optional parameter to speed up hash lookups
2677 * flags is not used at this time
2679 /* for demand attach fs, caller MUST NOT hold a ref count on hint */
2681 GetVolume(Error * ec, Error * client_ec, VolId volumeId, Volume * hint, int flags)
2684 /* pull this profiling/debugging code out of regular builds */
2686 #define VGET_CTR_INC(x) x++
2687 unsigned short V0 = 0, V1 = 0, V2 = 0, V3 = 0, V5 = 0, V6 =
2688 0, V7 = 0, V8 = 0, V9 = 0;
2689 unsigned short V10 = 0, V11 = 0, V12 = 0, V13 = 0, V14 = 0, V15 = 0;
2691 #define VGET_CTR_INC(x)
2693 #ifdef AFS_DEMAND_ATTACH_FS
2694 Volume *avp, * rvp = hint;
2697 #ifdef AFS_DEMAND_ATTACH_FS
2699 VCreateReservation_r(rvp);
2701 #endif /* AFS_DEMAND_ATTACH_FS */
2709 vp = VLookupVolume_r(ec, volumeId, vp);
2715 #ifdef AFS_DEMAND_ATTACH_FS
2716 if (rvp && (rvp != vp)) {
2717 /* break reservation on old vp */
2718 VCancelReservation_r(rvp);
2721 #endif /* AFS_DEMAND_ATTACH_FS */
2727 /* Until we have reached an initialization level of 2
2728 * we don't know whether this volume exists or not.
2729 * We can't sleep and retry later because before a volume
2730 * is attached, the caller tries to get it first. Just
2731 * return VOFFLINE and the caller can choose whether to
2732 * retry the command or not. */
2742 IncUInt64(&VStats.hdr_gets);
2744 #ifdef AFS_DEMAND_ATTACH_FS
2745 /* block if someone else is performing an exclusive op on this volume */
2748 VCreateReservation_r(rvp);
2750 VWaitExclusiveState_r(vp);
2752 /* short circuit with VNOVOL in the following circumstances:
2755 * VOL_STATE_SHUTTING_DOWN
2757 if ((V_attachState(vp) == VOL_STATE_ERROR) ||
2758 (V_attachState(vp) == VOL_STATE_SHUTTING_DOWN)) {
2765 * short circuit with VOFFLINE in the following circumstances:
2767 * VOL_STATE_UNATTACHED
2769 if (V_attachState(vp) == VOL_STATE_UNATTACHED) {
2775 /* allowable states:
2783 if (vp->salvage.requested) {
2784 VUpdateSalvagePriority_r(vp);
2787 if (V_attachState(vp) == VOL_STATE_PREATTACHED) {
2788 avp = VAttachVolumeByVp_r(ec, vp, 0);
2791 /* VAttachVolumeByVp_r can return a pointer
2792 * != the vp passed to it under certain
2793 * conditions; make sure we don't leak
2794 * reservations if that happens */
2796 VCancelReservation_r(rvp);
2798 VCreateReservation_r(rvp);
2808 if (!vp->pending_vol_op) {
2823 if ((V_attachState(vp) == VOL_STATE_SALVAGING) ||
2824 (*ec == VSALVAGING)) {
2826 /* see CheckVnode() in afsfileprocs.c for an explanation
2827 * of this error code logic */
2828 afs_uint32 now = FT_ApproxTime();
2829 if ((vp->stats.last_salvage + (10 * 60)) >= now) {
2832 *client_ec = VRESTARTING;
2841 LoadVolumeHeader(ec, vp);
2844 /* Only log the error if it was a totally unexpected error. Simply
2845 * a missing inode is likely to be caused by the volume being deleted */
2846 if (errno != ENXIO || LogLevel)
2847 Log("Volume %u: couldn't reread volume header\n",
2849 #ifdef AFS_DEMAND_ATTACH_FS
2850 if (programType == fileServer) {
2851 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
2856 #else /* AFS_DEMAND_ATTACH_FS */
2859 #endif /* AFS_DEMAND_ATTACH_FS */
2863 #ifdef AFS_DEMAND_ATTACH_FS
2865 * this test MUST happen after the volume header is loaded
2867 if (vp->pending_vol_op && !VVolOpLeaveOnline_r(vp, vp->pending_vol_op)) {
2869 /* see CheckVnode() in afsfileprocs.c for an explanation
2870 * of this error code logic */
2871 afs_uint32 now = FT_ApproxTime();
2872 if ((vp->stats.last_vol_op + (10 * 60)) >= now) {
2875 *client_ec = VRESTARTING;
2879 ReleaseVolumeHeader(vp->header);
2883 #endif /* AFS_DEMAND_ATTACH_FS */
2886 if (vp->shuttingDown) {
2893 if (programType == fileServer) {
2895 if (vp->goingOffline) {
2897 #ifdef AFS_DEMAND_ATTACH_FS
2898 /* wait for the volume to go offline */
2899 if (V_attachState(vp) == VOL_STATE_GOING_OFFLINE) {
2900 VWaitStateChange_r(vp);
2902 #elif defined(AFS_PTHREAD_ENV)
2903 VOL_CV_WAIT(&vol_put_volume_cond);
2904 #else /* AFS_PTHREAD_ENV */
2905 LWP_WaitProcess(VPutVolume);
2906 #endif /* AFS_PTHREAD_ENV */
2909 if (vp->specialStatus) {
2911 *ec = vp->specialStatus;
2912 } else if (V_inService(vp) == 0 || V_blessed(vp) == 0) {
2915 } else if (V_inUse(vp) == 0) {
2926 #ifdef AFS_DEMAND_ATTACH_FS
2927 /* if no error, bump nUsers */
2930 VLRU_UpdateAccess_r(vp);
2933 VCancelReservation_r(rvp);
2936 if (client_ec && !*client_ec) {
2939 #else /* AFS_DEMAND_ATTACH_FS */
2940 /* if no error, bump nUsers */
2947 #endif /* AFS_DEMAND_ATTACH_FS */
2954 /***************************************************/
2955 /* Volume offline/detach routines */
2956 /***************************************************/
2958 /* caller MUST hold a heavyweight ref on vp */
2959 #ifdef AFS_DEMAND_ATTACH_FS
2961 VTakeOffline_r(register Volume * vp)
2965 assert(vp->nUsers > 0);
2966 assert(programType == fileServer);
2968 VCreateReservation_r(vp);
2969 VWaitExclusiveState_r(vp);
2971 vp->goingOffline = 1;
2972 V_needsSalvaged(vp) = 1;
2974 VRequestSalvage_r(&error, vp, SALVSYNC_ERROR, 0);
2975 VCancelReservation_r(vp);
2977 #else /* AFS_DEMAND_ATTACH_FS */
2979 VTakeOffline_r(register Volume * vp)
2981 assert(vp->nUsers > 0);
2982 assert(programType == fileServer);
2984 vp->goingOffline = 1;
2985 V_needsSalvaged(vp) = 1;
2987 #endif /* AFS_DEMAND_ATTACH_FS */
2990 VTakeOffline(register Volume * vp)
2998 * force a volume offline.
3000 * @param[in] vp volume object pointer
3001 * @param[in] flags flags (see note below)
3003 * @note the flag VOL_FORCEOFF_NOUPDATE is a recursion control flag
3004 * used when VUpdateVolume_r needs to call VForceOffline_r
3005 * (which in turn would normally call VUpdateVolume_r)
3007 * @see VUpdateVolume_r
3009 * @pre VOL_LOCK must be held.
3010 * for DAFS, caller must hold ref.
3012 * @note for DAFS, it _is safe_ to call this function from an
3015 * @post needsSalvaged flag is set.
3016 * for DAFS, salvage is requested.
3017 * no further references to the volume through the volume
3018 * package will be honored.
3019 * all file descriptor and vnode caches are invalidated.
3021 * @warning this is a heavy-handed interface. it results in
3022 * a volume going offline regardless of the current
3023 * reference count state.
3025 * @internal volume package internal use only
3028 VForceOffline_r(Volume * vp, int flags)
3032 #ifdef AFS_DEMAND_ATTACH_FS
3033 VChangeState_r(vp, VOL_STATE_ERROR);
3038 strcpy(V_offlineMessage(vp),
3039 "Forced offline due to internal error: volume needs to be salvaged");
3040 Log("Volume %u forced offline: it needs salvaging!\n", V_id(vp));
3043 vp->goingOffline = 0;
3044 V_needsSalvaged(vp) = 1;
3045 if (!(flags & VOL_FORCEOFF_NOUPDATE)) {
3046 VUpdateVolume_r(&error, vp, VOL_UPDATE_NOFORCEOFF);
3049 #ifdef AFS_DEMAND_ATTACH_FS
3050 VRequestSalvage_r(&error, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
3051 #endif /* AFS_DEMAND_ATTACH_FS */
3053 #ifdef AFS_PTHREAD_ENV
3054 assert(pthread_cond_broadcast(&vol_put_volume_cond) == 0);
3055 #else /* AFS_PTHREAD_ENV */
3056 LWP_NoYieldSignal(VPutVolume);
3057 #endif /* AFS_PTHREAD_ENV */
3059 VReleaseVolumeHandles_r(vp);
3063 * force a volume offline.
3065 * @param[in] vp volume object pointer
3067 * @see VForceOffline_r
3070 VForceOffline(Volume * vp)
3073 VForceOffline_r(vp, 0);
3077 /* The opposite of VAttachVolume. The volume header is written to disk, with
3078 the inUse bit turned off. A copy of the header is maintained in memory,
3079 however (which is why this is VOffline, not VDetach).
3082 VOffline_r(Volume * vp, char *message)
3085 VolumeId vid = V_id(vp);
3087 assert(programType != volumeUtility);
3092 if (V_offlineMessage(vp)[0] == '\0')
3093 strncpy(V_offlineMessage(vp), message, sizeof(V_offlineMessage(vp)));
3094 V_offlineMessage(vp)[sizeof(V_offlineMessage(vp)) - 1] = '\0';
3096 vp->goingOffline = 1;
3097 #ifdef AFS_DEMAND_ATTACH_FS
3098 VChangeState_r(vp, VOL_STATE_GOING_OFFLINE);
3099 VCreateReservation_r(vp);
3102 /* wait for the volume to go offline */
3103 if (V_attachState(vp) == VOL_STATE_GOING_OFFLINE) {
3104 VWaitStateChange_r(vp);
3106 VCancelReservation_r(vp);
3107 #else /* AFS_DEMAND_ATTACH_FS */
3109 vp = VGetVolume_r(&error, vid); /* Wait for it to go offline */
3110 if (vp) /* In case it was reattached... */
3112 #endif /* AFS_DEMAND_ATTACH_FS */
3116 VOffline(Volume * vp, char *message)
3119 VOffline_r(vp, message);
3123 /* This gets used for the most part by utility routines that don't want
3124 * to keep all the volume headers around. Generally, the file server won't
3125 * call this routine, because then the offline message in the volume header
3126 * (or other information) won't be available to clients. For NAMEI, also
3127 * close the file handles. However, the fileserver does call this during
3128 * an attach following a volume operation.
3131 VDetachVolume_r(Error * ec, Volume * vp)
3134 struct DiskPartition64 *tpartp;
3135 int notifyServer, useDone = FSYNC_VOL_ON;
3137 *ec = 0; /* always "succeeds" */
3138 if (programType == volumeUtility) {
3139 notifyServer = vp->needsPutBack;
3140 if (V_destroyMe(vp) == DESTROY_ME)
3141 useDone = FSYNC_VOL_DONE;
3142 #ifdef AFS_DEMAND_ATTACH_FS
3143 else if (!V_blessed(vp) || !V_inService(vp))
3144 useDone = FSYNC_VOL_LEAVE_OFF;
3147 tpartp = vp->partition;
3149 DeleteVolumeFromHashTable(vp);
3150 vp->shuttingDown = 1;
3151 #ifdef AFS_DEMAND_ATTACH_FS
3152 DeleteVolumeFromVByPList_r(vp);
3154 VChangeState_r(vp, VOL_STATE_SHUTTING_DOWN);
3155 #endif /* AFS_DEMAND_ATTACH_FS */
3157 /* Will be detached sometime in the future--this is OK since volume is offline */
3159 /* XXX the following code should really be moved to VCheckDetach() since the volume
3160 * is not technically detached until the refcounts reach zero
3162 #ifdef FSSYNC_BUILD_CLIENT
3163 if (programType == volumeUtility && notifyServer) {
3165 * Note: The server is not notified in the case of a bogus volume
3166 * explicitly to make it possible to create a volume, do a partial
3167 * restore, then abort the operation without ever putting the volume
3168 * online. This is essential in the case of a volume move operation
3169 * between two partitions on the same server. In that case, there
3170 * would be two instances of the same volume, one of them bogus,
3171 * which the file server would attempt to put on line
3173 FSYNC_VolOp(volume, tpartp->name, useDone, 0, NULL);
3174 /* XXX this code path is only hit by volume utilities, thus
3175 * V_BreakVolumeCallbacks will always be NULL. if we really
3176 * want to break callbacks in this path we need to use FSYNC_VolOp() */
3178 /* Dettaching it so break all callbacks on it */
3179 if (V_BreakVolumeCallbacks) {
3180 Log("volume %u detached; breaking all call backs\n", volume);
3181 (*V_BreakVolumeCallbacks) (volume);
3185 #endif /* FSSYNC_BUILD_CLIENT */
3189 VDetachVolume(Error * ec, Volume * vp)
3192 VDetachVolume_r(ec, vp);
3197 /***************************************************/
3198 /* Volume fd/inode handle closing routines */
3199 /***************************************************/
3201 /* For VDetachVolume, we close all cached file descriptors, but keep
3202 * the Inode handles in case we need to read from a busy volume.
3204 /* for demand attach, caller MUST hold ref count on vp */
3206 VCloseVolumeHandles_r(Volume * vp)
3208 #ifdef AFS_DEMAND_ATTACH_FS
3209 VolState state_save;
3211 state_save = VChangeState_r(vp, VOL_STATE_OFFLINING);
3216 * XXX need to investigate whether we can perform
3217 * DFlushVolume outside of vol_glock_mutex...
3219 * VCloseVnodeFiles_r drops the glock internally */
3220 DFlushVolume(V_id(vp));
3221 VCloseVnodeFiles_r(vp);
3223 #ifdef AFS_DEMAND_ATTACH_FS
3227 /* Too time consuming and unnecessary for the volserver */
3228 if (programType != volumeUtility) {
3229 IH_CONDSYNC(vp->vnodeIndex[vLarge].handle);
3230 IH_CONDSYNC(vp->vnodeIndex[vSmall].handle);
3231 IH_CONDSYNC(vp->diskDataHandle);
3233 IH_CONDSYNC(vp->linkHandle);
3234 #endif /* AFS_NT40_ENV */
3237 IH_REALLYCLOSE(vp->vnodeIndex[vLarge].handle);
3238 IH_REALLYCLOSE(vp->vnodeIndex[vSmall].handle);
3239 IH_REALLYCLOSE(vp->diskDataHandle);
3240 IH_REALLYCLOSE(vp->linkHandle);
3242 #ifdef AFS_DEMAND_ATTACH_FS
3244 VChangeState_r(vp, state_save);
3248 /* For both VForceOffline and VOffline, we close all relevant handles.
3249 * For VOffline, if we re-attach the volume, the files may possible be
3250 * different than before.
3252 /* for demand attach, caller MUST hold a ref count on vp */
3254 VReleaseVolumeHandles_r(Volume * vp)
3256 #ifdef AFS_DEMAND_ATTACH_FS
3257 VolState state_save;
3259 state_save = VChangeState_r(vp, VOL_STATE_DETACHING);
3262 /* XXX need to investigate whether we can perform
3263 * DFlushVolume outside of vol_glock_mutex... */
3264 DFlushVolume(V_id(vp));
3266 VReleaseVnodeFiles_r(vp); /* releases the glock internally */
3268 #ifdef AFS_DEMAND_ATTACH_FS
3272 /* Too time consuming and unnecessary for the volserver */
3273 if (programType != volumeUtility) {
3274 IH_CONDSYNC(vp->vnodeIndex[vLarge].handle);
3275 IH_CONDSYNC(vp->vnodeIndex[vSmall].handle);
3276 IH_CONDSYNC(vp->diskDataHandle);
3278 IH_CONDSYNC(vp->linkHandle);
3279 #endif /* AFS_NT40_ENV */
3282 IH_RELEASE(vp->vnodeIndex[vLarge].handle);
3283 IH_RELEASE(vp->vnodeIndex[vSmall].handle);
3284 IH_RELEASE(vp->diskDataHandle);
3285 IH_RELEASE(vp->linkHandle);
3287 #ifdef AFS_DEMAND_ATTACH_FS
3289 VChangeState_r(vp, state_save);
3294 /***************************************************/
3295 /* Volume write and fsync routines */
3296 /***************************************************/
3299 VUpdateVolume_r(Error * ec, Volume * vp, int flags)
3301 #ifdef AFS_DEMAND_ATTACH_FS
3302 VolState state_save;
3304 if (flags & VOL_UPDATE_WAIT) {
3305 VCreateReservation_r(vp);
3306 VWaitExclusiveState_r(vp);
3311 if (programType == fileServer)
3313 (V_inUse(vp) ? V_nextVnodeUnique(vp) +
3314 200 : V_nextVnodeUnique(vp));
3316 #ifdef AFS_DEMAND_ATTACH_FS
3317 state_save = VChangeState_r(vp, VOL_STATE_UPDATING);
3321 WriteVolumeHeader_r(ec, vp);
3323 #ifdef AFS_DEMAND_ATTACH_FS
3325 VChangeState_r(vp, state_save);
3326 if (flags & VOL_UPDATE_WAIT) {
3327 VCancelReservation_r(vp);
3332 Log("VUpdateVolume: error updating volume header, volume %u (%s)\n",
3333 V_id(vp), V_name(vp));
3334 /* try to update on-disk header,
3335 * while preventing infinite recursion */
3336 if (!(flags & VOL_UPDATE_NOFORCEOFF)) {
3337 VForceOffline_r(vp, VOL_FORCEOFF_NOUPDATE);
3343 VUpdateVolume(Error * ec, Volume * vp)
3346 VUpdateVolume_r(ec, vp, VOL_UPDATE_WAIT);
3351 VSyncVolume_r(Error * ec, Volume * vp, int flags)
3355 #ifdef AFS_DEMAND_ATTACH_FS
3356 VolState state_save;
3359 if (flags & VOL_SYNC_WAIT) {
3360 VUpdateVolume_r(ec, vp, VOL_UPDATE_WAIT);
3362 VUpdateVolume_r(ec, vp, 0);
3365 #ifdef AFS_DEMAND_ATTACH_FS
3366 state_save = VChangeState_r(vp, VOL_STATE_UPDATING);
3369 fdP = IH_OPEN(V_diskDataHandle(vp));
3370 assert(fdP != NULL);
3371 code = FDH_SYNC(fdP);
3374 #ifdef AFS_DEMAND_ATTACH_FS
3376 VChangeState_r(vp, state_save);
3382 VSyncVolume(Error * ec, Volume * vp)
3385 VSyncVolume_r(ec, vp, VOL_SYNC_WAIT);
3390 /***************************************************/
3391 /* Volume dealloaction routines */
3392 /***************************************************/
3394 #ifdef AFS_DEMAND_ATTACH_FS
3396 FreeVolume(Volume * vp)
3398 /* free the heap space, iff it's safe.
3399 * otherwise, pull it out of the hash table, so it
3400 * will get deallocated when all refs to it go away */
3401 if (!VCheckFree(vp)) {
3402 DeleteVolumeFromHashTable(vp);
3403 DeleteVolumeFromVByPList_r(vp);
3405 /* make sure we invalidate the header cache entry */
3406 FreeVolumeHeader(vp);
3409 #endif /* AFS_DEMAND_ATTACH_FS */
3412 ReallyFreeVolume(Volume * vp)
3417 #ifdef AFS_DEMAND_ATTACH_FS
3419 VChangeState_r(vp, VOL_STATE_FREED);
3420 if (vp->pending_vol_op)
3421 free(vp->pending_vol_op);
3422 #endif /* AFS_DEMAND_ATTACH_FS */
3423 for (i = 0; i < nVNODECLASSES; i++)
3424 if (vp->vnodeIndex[i].bitmap)
3425 free(vp->vnodeIndex[i].bitmap);
3426 FreeVolumeHeader(vp);
3427 #ifndef AFS_DEMAND_ATTACH_FS
3428 DeleteVolumeFromHashTable(vp);
3429 #endif /* AFS_DEMAND_ATTACH_FS */
3433 /* check to see if we should shutdown this volume
3434 * returns 1 if volume was freed, 0 otherwise */
3435 #ifdef AFS_DEMAND_ATTACH_FS
3437 VCheckDetach(register Volume * vp)
3442 if (vp->nUsers || vp->nWaiters)
3445 if (vp->shuttingDown) {
3447 if ((programType != fileServer) &&
3448 (V_inUse(vp) == programType) &&
3449 ((V_checkoutMode(vp) == V_VOLUPD) ||
3450 ((V_checkoutMode(vp) == V_CLONE) &&
3451 (VolumeWriteable(vp))))) {
3453 VUpdateVolume_r(&ec, vp, VOL_UPDATE_NOFORCEOFF);
3455 Log("VCheckDetach: failed to clear inUse failed during detachment of volid %u\n",
3459 VReleaseVolumeHandles_r(vp);
3461 ReallyFreeVolume(vp);
3462 if (programType == fileServer) {
3463 assert(pthread_cond_broadcast(&vol_put_volume_cond) == 0);
3468 #else /* AFS_DEMAND_ATTACH_FS */
3470 VCheckDetach(register Volume * vp)
3478 if (vp->shuttingDown) {
3480 if ((programType != fileServer) &&
3481 (V_inUse(vp) == programType) &&
3482 ((V_checkoutMode(vp) == V_VOLUPD) ||
3483 ((V_checkoutMode(vp) == V_CLONE) &&
3484 (VolumeWriteable(vp))))) {
3486 VUpdateVolume_r(&ec, vp, VOL_UPDATE_NOFORCEOFF);
3488 Log("VCheckDetach: failed to clear inUse failed during detachment of volid %u\n",
3492 VReleaseVolumeHandles_r(vp);
3493 ReallyFreeVolume(vp);
3494 if (programType == fileServer) {
3495 #if defined(AFS_PTHREAD_ENV)
3496 assert(pthread_cond_broadcast(&vol_put_volume_cond) == 0);
3497 #else /* AFS_PTHREAD_ENV */
3498 LWP_NoYieldSignal(VPutVolume);
3499 #endif /* AFS_PTHREAD_ENV */
3504 #endif /* AFS_DEMAND_ATTACH_FS */
3506 /* check to see if we should offline this volume
3507 * return 1 if volume went offline, 0 otherwise */
3508 #ifdef AFS_DEMAND_ATTACH_FS
3510 VCheckOffline(register Volume * vp)
3512 Volume * rvp = NULL;
3515 if (vp->goingOffline && !vp->nUsers) {
3517 assert(programType == fileServer);
3518 assert((V_attachState(vp) != VOL_STATE_ATTACHED) &&
3519 (V_attachState(vp) != VOL_STATE_FREED) &&
3520 (V_attachState(vp) != VOL_STATE_PREATTACHED) &&
3521 (V_attachState(vp) != VOL_STATE_UNATTACHED));
3525 * VOL_STATE_GOING_OFFLINE
3526 * VOL_STATE_SHUTTING_DOWN
3527 * VIsErrorState(V_attachState(vp))
3528 * VIsExclusiveState(V_attachState(vp))
3531 VCreateReservation_r(vp);
3532 VChangeState_r(vp, VOL_STATE_OFFLINING);
3535 /* must clear the goingOffline flag before we drop the glock */
3536 vp->goingOffline = 0;
3541 /* perform async operations */
3542 VUpdateVolume_r(&error, vp, 0);
3543 VCloseVolumeHandles_r(vp);
3546 Log("VOffline: Volume %u (%s) is now offline", V_id(vp),
3548 if (V_offlineMessage(vp)[0])
3549 Log(" (%s)", V_offlineMessage(vp));
3553 /* invalidate the volume header cache entry */
3554 FreeVolumeHeader(vp);
3556 /* if nothing changed state to error or salvaging,
3557 * drop state to unattached */
3558 if (!VIsErrorState(V_attachState(vp))) {
3559 VChangeState_r(vp, VOL_STATE_UNATTACHED);
3561 VCancelReservation_r(vp);
3562 /* no usage of vp is safe beyond this point */
3566 #else /* AFS_DEMAND_ATTACH_FS */
3568 VCheckOffline(register Volume * vp)
3570 Volume * rvp = NULL;
3573 if (vp->goingOffline && !vp->nUsers) {
3575 assert(programType == fileServer);
3578 vp->goingOffline = 0;
3580 VUpdateVolume_r(&error, vp, 0);
3581 VCloseVolumeHandles_r(vp);
3583 Log("VOffline: Volume %u (%s) is now offline", V_id(vp),
3585 if (V_offlineMessage(vp)[0])
3586 Log(" (%s)", V_offlineMessage(vp));
3589 FreeVolumeHeader(vp);
3590 #ifdef AFS_PTHREAD_ENV
3591 assert(pthread_cond_broadcast(&vol_put_volume_cond) == 0);
3592 #else /* AFS_PTHREAD_ENV */
3593 LWP_NoYieldSignal(VPutVolume);
3594 #endif /* AFS_PTHREAD_ENV */
3598 #endif /* AFS_DEMAND_ATTACH_FS */
3600 /***************************************************/
3601 /* demand attach fs ref counting routines */
3602 /***************************************************/
3604 #ifdef AFS_DEMAND_ATTACH_FS
3605 /* the following two functions handle reference counting for
3606 * asynchronous operations on volume structs.
3608 * their purpose is to prevent a VDetachVolume or VShutdown
3609 * from free()ing the Volume struct during an async i/o op */
3611 /* register with the async volume op ref counter */
3612 /* VCreateReservation_r moved into inline code header because it
3613 * is now needed in vnode.c -- tkeiser 11/20/2007
3617 * decrement volume-package internal refcount.
3619 * @param vp volume object pointer
3621 * @internal volume package internal use only
3624 * @arg VOL_LOCK is held
3625 * @arg lightweight refcount held
3627 * @post volume waiters refcount is decremented; volume may
3628 * have been deallocated/shutdown/offlined/salvaged/
3629 * whatever during the process
3631 * @warning once you have tossed your last reference (you can acquire
3632 * lightweight refs recursively) it is NOT SAFE to reference
3633 * a volume object pointer ever again
3635 * @see VCreateReservation_r
3637 * @note DEMAND_ATTACH_FS only
3640 VCancelReservation_r(Volume * vp)
3642 assert(--vp->nWaiters >= 0);
3643 if (vp->nWaiters == 0) {
3645 if (!VCheckDetach(vp)) {
3652 /* check to see if we should free this volume now
3653 * return 1 if volume was freed, 0 otherwise */
3655 VCheckFree(Volume * vp)
3658 if ((vp->nUsers == 0) &&
3659 (vp->nWaiters == 0) &&
3660 !(V_attachFlags(vp) & (VOL_IN_HASH |
3664 ReallyFreeVolume(vp);
3669 #endif /* AFS_DEMAND_ATTACH_FS */
3672 /***************************************************/
3673 /* online volume operations routines */
3674 /***************************************************/
3676 #ifdef AFS_DEMAND_ATTACH_FS
3678 * register a volume operation on a given volume.
3680 * @param[in] vp volume object
3681 * @param[in] vopinfo volume operation info object
3683 * @pre VOL_LOCK is held
3685 * @post volume operation info object attached to volume object.
3686 * volume operation statistics updated.
3688 * @note by "attached" we mean a copy of the passed in object is made
3690 * @internal volume package internal use only
3693 VRegisterVolOp_r(Volume * vp, FSSYNC_VolOp_info * vopinfo)
3695 FSSYNC_VolOp_info * info;
3697 /* attach a vol op info node to the volume struct */
3698 info = (FSSYNC_VolOp_info *) malloc(sizeof(FSSYNC_VolOp_info));
3699 assert(info != NULL);
3700 memcpy(info, vopinfo, sizeof(FSSYNC_VolOp_info));
3701 vp->pending_vol_op = info;
3704 vp->stats.last_vol_op = FT_ApproxTime();
3705 vp->stats.vol_ops++;
3706 IncUInt64(&VStats.vol_ops);
3712 * deregister the volume operation attached to this volume.
3714 * @param[in] vp volume object pointer
3716 * @pre VOL_LOCK is held
3718 * @post the volume operation info object is detached from the volume object
3720 * @internal volume package internal use only
3723 VDeregisterVolOp_r(Volume * vp)
3725 if (vp->pending_vol_op) {
3726 free(vp->pending_vol_op);
3727 vp->pending_vol_op = NULL;
3731 #endif /* AFS_DEMAND_ATTACH_FS */
3734 * determine whether it is safe to leave a volume online during
3735 * the volume operation described by the vopinfo object.
3737 * @param[in] vp volume object
3738 * @param[in] vopinfo volume operation info object
3740 * @return whether it is safe to leave volume online
3741 * @retval 0 it is NOT SAFE to leave the volume online
3742 * @retval 1 it is safe to leave the volume online during the operation
3745 * @arg VOL_LOCK is held
3746 * @arg disk header attached to vp (heavyweight ref on vp will guarantee
3747 * this condition is met)
3749 * @internal volume package internal use only
3752 VVolOpLeaveOnline_r(Volume * vp, FSSYNC_VolOp_info * vopinfo)
3754 return (vopinfo->com.command == FSYNC_VOL_NEEDVOLUME &&
3755 (vopinfo->com.reason == V_READONLY ||
3756 (!VolumeWriteable(vp) &&
3757 (vopinfo->com.reason == V_CLONE ||
3758 vopinfo->com.reason == V_DUMP))));
3762 * determine whether VBUSY should be set during this volume operation.
3764 * @param[in] vp volume object
3765 * @param[in] vopinfo volume operation info object
3767 * @return whether VBUSY should be set
3768 * @retval 0 VBUSY does NOT need to be set
3769 * @retval 1 VBUSY SHOULD be set
3771 * @pre VOL_LOCK is held
3773 * @internal volume package internal use only
3776 VVolOpSetVBusy_r(Volume * vp, FSSYNC_VolOp_info * vopinfo)
3778 return (vopinfo->com.command == FSYNC_VOL_NEEDVOLUME &&
3779 (vopinfo->com.reason == V_CLONE ||
3780 vopinfo->com.reason == V_DUMP));
3784 /***************************************************/
3785 /* online salvager routines */
3786 /***************************************************/
3787 #if defined(AFS_DEMAND_ATTACH_FS)
3788 #define SALVAGE_PRIO_UPDATE_INTERVAL 3 /**< number of seconds between prio updates */
3789 #define SALVAGE_COUNT_MAX 16 /**< number of online salvages we
3790 * allow before moving the volume
3791 * into a permanent error state
3793 * once this threshold is reached,
3794 * the operator will have to manually
3795 * issue a 'bos salvage' to bring
3796 * the volume back online
3800 * check whether a salvage needs to be performed on this volume.
3802 * @param[in] vp pointer to volume object
3804 * @return status code
3805 * @retval 0 no salvage scheduled
3806 * @retval 1 a salvage has been scheduled with the salvageserver
3808 * @pre VOL_LOCK is held
3810 * @post if salvage request flag is set and nUsers and nWaiters are zero,
3811 * then a salvage will be requested
3813 * @note this is one of the event handlers called by VCancelReservation_r
3815 * @see VCancelReservation_r
3817 * @internal volume package internal use only.
3820 VCheckSalvage(register Volume * vp)
3823 #ifdef SALVSYNC_BUILD_CLIENT
3824 if (vp->nUsers || vp->nWaiters)
3826 if (vp->salvage.requested) {
3827 VScheduleSalvage_r(vp);
3830 #endif /* SALVSYNC_BUILD_CLIENT */
3835 * request volume salvage.
3837 * @param[out] ec computed client error code
3838 * @param[in] vp volume object pointer
3839 * @param[in] reason reason code (passed to salvageserver via SALVSYNC)
3840 * @param[in] flags see flags note below
3843 * VOL_SALVAGE_INVALIDATE_HEADER causes volume header cache entry
3844 * to be invalidated.
3846 * @pre VOL_LOCK is held.
3848 * @post volume state is changed.
3849 * for fileserver, salvage will be requested once refcount reaches zero.
3851 * @return operation status code
3852 * @retval 0 volume salvage will occur
3853 * @retval 1 volume salvage could not be scheduled
3855 * @note DAFS fileserver only
3857 * @note this call does not synchronously schedule a volume salvage. rather,
3858 * it sets volume state so that when volume refcounts reach zero, a
3859 * volume salvage will occur. by "refcounts", we mean both nUsers and
3860 * nWaiters must be zero.
3862 * @internal volume package internal use only.
3865 VRequestSalvage_r(Error * ec, Volume * vp, int reason, int flags)
3869 * for DAFS volume utilities, transition to error state
3870 * (at some point in the future, we should consider
3871 * making volser talk to salsrv)
3873 if (programType != fileServer) {
3874 VChangeState_r(vp, VOL_STATE_ERROR);
3879 if (!vp->salvage.requested) {
3880 vp->salvage.requested = 1;
3881 vp->salvage.reason = reason;
3882 vp->stats.last_salvage = FT_ApproxTime();
3883 if (flags & VOL_SALVAGE_INVALIDATE_HEADER) {
3884 /* XXX this should likely be changed to FreeVolumeHeader() */
3885 ReleaseVolumeHeader(vp->header);
3887 if (vp->stats.salvages < SALVAGE_COUNT_MAX) {
3888 VChangeState_r(vp, VOL_STATE_SALVAGING);
3891 Log("VRequestSalvage: volume %u online salvaged too many times; forced offline.\n", vp->hashid);
3892 VChangeState_r(vp, VOL_STATE_ERROR);
3901 * update salvageserver scheduling priority for a volume.
3903 * @param[in] vp pointer to volume object
3905 * @return operation status
3907 * @retval 1 request denied, or SALVSYNC communications failure
3909 * @pre VOL_LOCK is held.
3911 * @post in-core salvage priority counter is incremented. if at least
3912 * SALVAGE_PRIO_UPDATE_INTERVAL seconds have elapsed since the
3913 * last SALVSYNC_RAISEPRIO request, we contact the salvageserver
3914 * to update its priority queue. if no salvage is scheduled,
3915 * this function is a no-op.
3917 * @note DAFS fileserver only
3919 * @note this should be called whenever a VGetVolume fails due to a
3920 * pending salvage request
3922 * @todo should set exclusive state and drop glock around salvsync call
3924 * @internal volume package internal use only.
3927 VUpdateSalvagePriority_r(Volume * vp)
3932 #ifdef SALVSYNC_BUILD_CLIENT
3934 now = FT_ApproxTime();
3936 /* update the salvageserver priority queue occasionally so that
3937 * frequently requested volumes get moved to the head of the queue
3939 if ((vp->salvage.scheduled) &&
3940 (vp->stats.last_salvage_req < (now-SALVAGE_PRIO_UPDATE_INTERVAL))) {
3941 code = SALVSYNC_SalvageVolume(vp->hashid,
3942 VPartitionPath(vp->partition),
3947 vp->stats.last_salvage_req = now;
3948 if (code != SYNC_OK) {
3952 #endif /* SALVSYNC_BUILD_CLIENT */
3958 * schedule a salvage with the salvage server.
3960 * @param[in] vp pointer to volume object
3962 * @return operation status
3963 * @retval 0 salvage scheduled successfully
3964 * @retval 1 salvage not scheduled, or SALVSYNC com error
3967 * @arg VOL_LOCK is held.
3968 * @arg nUsers and nWaiters should be zero.
3970 * @post salvageserver is sent a salvage request
3972 * @note DAFS fileserver only
3974 * @internal volume package internal use only.
3977 VScheduleSalvage_r(Volume * vp)
3980 #ifdef SALVSYNC_BUILD_CLIENT
3981 VolState state_save;
3984 if (vp->nWaiters || vp->nUsers) {
3988 /* prevent endless salvage,attach,salvage,attach,... loops */
3989 if (vp->stats.salvages >= SALVAGE_COUNT_MAX)
3992 if (!vp->salvage.scheduled) {
3993 /* if we haven't previously scheduled a salvage, do so now
3995 * set the volume to an exclusive state and drop the lock
3996 * around the SALVSYNC call
3998 * note that we do NOT acquire a reservation here -- doing so
3999 * could result in unbounded recursion
4001 strlcpy(partName, VPartitionPath(vp->partition), sizeof(partName));
4002 state_save = VChangeState_r(vp, VOL_STATE_SALVSYNC_REQ);
4003 V_attachFlags(vp) |= VOL_IS_BUSY;
4006 /* can't use V_id() since there's no guarantee
4007 * we have the disk data header at this point */
4008 code = SALVSYNC_SalvageVolume(vp->hashid,
4015 VChangeState_r(vp, state_save);
4016 V_attachFlags(vp) &= ~(VOL_IS_BUSY);
4018 if (code == SYNC_OK) {
4019 vp->salvage.scheduled = 1;
4020 vp->stats.salvages++;
4021 vp->stats.last_salvage_req = FT_ApproxTime();
4022 IncUInt64(&VStats.salvages);
4026 case SYNC_BAD_COMMAND:
4027 case SYNC_COM_ERROR:
4030 Log("VScheduleSalvage_r: SALVSYNC request denied\n");
4033 Log("VScheduleSalvage_r: SALVSYNC unknown protocol error\n");
4038 #endif /* SALVSYNC_BUILD_CLIENT */
4043 * ask salvageserver to cancel a scheduled salvage operation.
4045 * @param[in] vp pointer to volume object
4046 * @param[in] reason SALVSYNC protocol reason code
4048 * @return operation status
4050 * @retval 1 request failed
4052 * @pre VOL_LOCK is held.
4054 * @post salvageserver is sent a request to cancel the volume salvage
4056 * @todo should set exclusive state and drop glock around salvsync call
4058 * @internal volume package internal use only.
4061 VCancelSalvage_r(Volume * vp, int reason)
4065 #ifdef SALVSYNC_BUILD_CLIENT
4066 if (vp->salvage.scheduled) {
4067 code = SALVSYNC_SalvageVolume(vp->hashid,
4068 VPartitionPath(vp->partition),
4073 if (code == SYNC_OK) {
4074 vp->salvage.scheduled = 0;
4079 #endif /* SALVSYNC_BUILD_CLIENT */
4084 #ifdef SALVSYNC_BUILD_CLIENT
4086 * connect to the salvageserver SYNC service.
4088 * @return operation status
4092 * @post connection to salvageserver SYNC service established
4094 * @see VConnectSALV_r
4095 * @see VDisconnectSALV
4096 * @see VReconnectSALV
4103 retVal = VConnectSALV_r();
4109 * connect to the salvageserver SYNC service.
4111 * @return operation status
4115 * @pre VOL_LOCK is held.
4117 * @post connection to salvageserver SYNC service established
4120 * @see VDisconnectSALV_r
4121 * @see VReconnectSALV_r
4122 * @see SALVSYNC_clientInit
4124 * @internal volume package internal use only.
4127 VConnectSALV_r(void)
4129 return SALVSYNC_clientInit();
4133 * disconnect from the salvageserver SYNC service.
4135 * @return operation status
4138 * @pre client should have a live connection to the salvageserver
4140 * @post connection to salvageserver SYNC service destroyed
4142 * @see VDisconnectSALV_r
4144 * @see VReconnectSALV
4147 VDisconnectSALV(void)
4151 VDisconnectSALV_r();
4157 * disconnect from the salvageserver SYNC service.
4159 * @return operation status
4163 * @arg VOL_LOCK is held.
4164 * @arg client should have a live connection to the salvageserver.
4166 * @post connection to salvageserver SYNC service destroyed
4168 * @see VDisconnectSALV
4169 * @see VConnectSALV_r
4170 * @see VReconnectSALV_r
4171 * @see SALVSYNC_clientFinis
4173 * @internal volume package internal use only.
4176 VDisconnectSALV_r(void)
4178 return SALVSYNC_clientFinis();
4182 * disconnect and then re-connect to the salvageserver SYNC service.
4184 * @return operation status
4188 * @pre client should have a live connection to the salvageserver
4190 * @post old connection is dropped, and a new one is established
4193 * @see VDisconnectSALV
4194 * @see VReconnectSALV_r
4197 VReconnectSALV(void)
4201 retVal = VReconnectSALV_r();
4207 * disconnect and then re-connect to the salvageserver SYNC service.
4209 * @return operation status
4214 * @arg VOL_LOCK is held.
4215 * @arg client should have a live connection to the salvageserver.
4217 * @post old connection is dropped, and a new one is established
4219 * @see VConnectSALV_r
4220 * @see VDisconnectSALV
4221 * @see VReconnectSALV
4222 * @see SALVSYNC_clientReconnect
4224 * @internal volume package internal use only.
4227 VReconnectSALV_r(void)
4229 return SALVSYNC_clientReconnect();
4231 #endif /* SALVSYNC_BUILD_CLIENT */
4232 #endif /* AFS_DEMAND_ATTACH_FS */
4235 /***************************************************/
4236 /* FSSYNC routines */
4237 /***************************************************/
4239 /* This must be called by any volume utility which needs to run while the
4240 file server is also running. This is separated from VInitVolumePackage so
4241 that a utility can fork--and each of the children can independently
4242 initialize communication with the file server */
4243 #ifdef FSSYNC_BUILD_CLIENT
4245 * connect to the fileserver SYNC service.
4247 * @return operation status
4252 * @arg VInit must equal 2.
4253 * @arg Program Type must not be fileserver or salvager.
4255 * @post connection to fileserver SYNC service established
4258 * @see VDisconnectFS
4259 * @see VChildProcReconnectFS
4266 retVal = VConnectFS_r();
4272 * connect to the fileserver SYNC service.
4274 * @return operation status
4279 * @arg VInit must equal 2.
4280 * @arg Program Type must not be fileserver or salvager.
4281 * @arg VOL_LOCK is held.
4283 * @post connection to fileserver SYNC service established
4286 * @see VDisconnectFS_r
4287 * @see VChildProcReconnectFS_r
4289 * @internal volume package internal use only.
4295 assert((VInit == 2) &&
4296 (programType != fileServer) &&
4297 (programType != salvager));
4298 rc = FSYNC_clientInit();
4305 * disconnect from the fileserver SYNC service.
4308 * @arg client should have a live connection to the fileserver.
4309 * @arg VOL_LOCK is held.
4310 * @arg Program Type must not be fileserver or salvager.
4312 * @post connection to fileserver SYNC service destroyed
4314 * @see VDisconnectFS
4316 * @see VChildProcReconnectFS_r
4318 * @internal volume package internal use only.
4321 VDisconnectFS_r(void)
4323 assert((programType != fileServer) &&
4324 (programType != salvager));
4325 FSYNC_clientFinis();
4330 * disconnect from the fileserver SYNC service.
4333 * @arg client should have a live connection to the fileserver.
4334 * @arg Program Type must not be fileserver or salvager.
4336 * @post connection to fileserver SYNC service destroyed
4338 * @see VDisconnectFS_r
4340 * @see VChildProcReconnectFS
4351 * connect to the fileserver SYNC service from a child process following a fork.
4353 * @return operation status
4358 * @arg VOL_LOCK is held.
4359 * @arg current FSYNC handle is shared with a parent process
4361 * @post current FSYNC handle is discarded and a new connection to the
4362 * fileserver SYNC service is established
4364 * @see VChildProcReconnectFS
4366 * @see VDisconnectFS_r
4368 * @internal volume package internal use only.
4371 VChildProcReconnectFS_r(void)
4373 return FSYNC_clientChildProcReconnect();
4377 * connect to the fileserver SYNC service from a child process following a fork.
4379 * @return operation status
4383 * @pre current FSYNC handle is shared with a parent process
4385 * @post current FSYNC handle is discarded and a new connection to the
4386 * fileserver SYNC service is established
4388 * @see VChildProcReconnectFS_r
4390 * @see VDisconnectFS
4393 VChildProcReconnectFS(void)
4397 ret = VChildProcReconnectFS_r();
4401 #endif /* FSSYNC_BUILD_CLIENT */
4404 /***************************************************/
4405 /* volume bitmap routines */
4406 /***************************************************/
4409 * For demand attach fs, flags parameter controls
4410 * locking behavior. If (flags & VOL_ALLOC_BITMAP_WAIT)
4411 * is set, then this function will create a reservation
4412 * and block on any other exclusive operations. Otherwise,
4413 * this function assumes the caller already has exclusive
4414 * access to vp, and we just change the volume state.
4417 VAllocBitmapEntry_r(Error * ec, Volume * vp,
4418 struct vnodeIndex *index, int flags)
4421 register byte *bp, *ep;
4422 #ifdef AFS_DEMAND_ATTACH_FS
4423 VolState state_save;
4424 #endif /* AFS_DEMAND_ATTACH_FS */
4428 /* This test is probably redundant */
4429 if (!VolumeWriteable(vp)) {
4430 *ec = (bit32) VREADONLY;
4434 #ifdef AFS_DEMAND_ATTACH_FS
4435 if (flags & VOL_ALLOC_BITMAP_WAIT) {
4436 VCreateReservation_r(vp);
4437 VWaitExclusiveState_r(vp);
4439 state_save = VChangeState_r(vp, VOL_STATE_GET_BITMAP);
4440 #endif /* AFS_DEMAND_ATTACH_FS */
4443 if ((programType == fileServer) && !index->bitmap) {
4445 #ifndef AFS_DEMAND_ATTACH_FS
4446 /* demand attach fs uses the volume state to avoid races.
4447 * specialStatus field is not used at all */
4449 if (vp->specialStatus == VBUSY) {
4450 if (vp->goingOffline) { /* vos dump waiting for the volume to
4451 * go offline. We probably come here
4452 * from AddNewReadableResidency */
4455 while (vp->specialStatus == VBUSY) {
4456 #ifdef AFS_PTHREAD_ENV
4460 #else /* !AFS_PTHREAD_ENV */
4462 #endif /* !AFS_PTHREAD_ENV */
4466 #endif /* !AFS_DEMAND_ATTACH_FS */
4468 if (!index->bitmap) {
4469 #ifndef AFS_DEMAND_ATTACH_FS
4470 vp->specialStatus = VBUSY; /* Stop anyone else from using it. */
4471 #endif /* AFS_DEMAND_ATTACH_FS */
4472 for (i = 0; i < nVNODECLASSES; i++) {
4473 VGetBitmap_r(ec, vp, i);
4475 #ifdef AFS_DEMAND_ATTACH_FS
4476 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
4477 #else /* AFS_DEMAND_ATTACH_FS */
4478 DeleteVolumeFromHashTable(vp);
4479 vp->shuttingDown = 1; /* Let who has it free it. */
4480 vp->specialStatus = 0;
4481 #endif /* AFS_DEMAND_ATTACH_FS */
4486 #ifndef AFS_DEMAND_ATTACH_FS
4488 vp->specialStatus = 0; /* Allow others to have access. */
4489 #endif /* AFS_DEMAND_ATTACH_FS */
4492 #endif /* BITMAP_LATER */
4494 #ifdef AFS_DEMAND_ATTACH_FS
4496 #endif /* AFS_DEMAND_ATTACH_FS */
4497 bp = index->bitmap + index->bitmapOffset;
4498 ep = index->bitmap + index->bitmapSize;
4500 if ((*(bit32 *) bp) != (bit32) 0xffffffff) {
4502 index->bitmapOffset = (afs_uint32) (bp - index->bitmap);
4505 o = ffs(~*bp) - 1; /* ffs is documented in BSTRING(3) */
4507 ret = (VnodeId) ((bp - index->bitmap) * 8 + o);
4508 #ifdef AFS_DEMAND_ATTACH_FS
4510 #endif /* AFS_DEMAND_ATTACH_FS */
4513 bp += sizeof(bit32) /* i.e. 4 */ ;
4515 /* No bit map entry--must grow bitmap */
4517 realloc(index->bitmap, index->bitmapSize + VOLUME_BITMAP_GROWSIZE);
4520 bp += index->bitmapSize;
4521 memset(bp, 0, VOLUME_BITMAP_GROWSIZE);
4522 index->bitmapOffset = index->bitmapSize;
4523 index->bitmapSize += VOLUME_BITMAP_GROWSIZE;
4525 ret = index->bitmapOffset * 8;
4526 #ifdef AFS_DEMAND_ATTACH_FS
4528 #endif /* AFS_DEMAND_ATTACH_FS */
4531 #ifdef AFS_DEMAND_ATTACH_FS
4532 VChangeState_r(vp, state_save);
4533 if (flags & VOL_ALLOC_BITMAP_WAIT) {
4534 VCancelReservation_r(vp);
4536 #endif /* AFS_DEMAND_ATTACH_FS */
4541 VAllocBitmapEntry(Error * ec, Volume * vp, register struct vnodeIndex * index)
4545 retVal = VAllocBitmapEntry_r(ec, vp, index, VOL_ALLOC_BITMAP_WAIT);
4551 VFreeBitMapEntry_r(Error * ec, register struct vnodeIndex *index,
4554 unsigned int offset;
4560 #endif /* BITMAP_LATER */
4561 offset = bitNumber >> 3;
4562 if (offset >= index->bitmapSize) {
4566 if (offset < index->bitmapOffset)
4567 index->bitmapOffset = offset & ~3; /* Truncate to nearest bit32 */
4568 *(index->bitmap + offset) &= ~(1 << (bitNumber & 0x7));
4572 VFreeBitMapEntry(Error * ec, register struct vnodeIndex *index,
4576 VFreeBitMapEntry_r(ec, index, bitNumber);
4580 /* this function will drop the glock internally.
4581 * for old pthread fileservers, this is safe thanks to vbusy.
4583 * for demand attach fs, caller must have already called
4584 * VCreateReservation_r and VWaitExclusiveState_r */