2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
9 * Portions Copyright (c) 2005-2008 Sine Nomine Associates
12 /* 1/1/89: NB: this stuff is all going to be replaced. Don't take it too seriously */
17 Institution: The Information Technology Center, Carnegie-Mellon University
21 #include <afsconfig.h>
22 #include <afs/param.h>
28 #include <afs/afsint.h>
31 #include <sys/param.h>
32 #if !defined(AFS_SGI_ENV)
35 #else /* AFS_OSF_ENV */
36 #ifdef AFS_VFSINCL_ENV
39 #include <sys/fs/ufs_fs.h>
41 #if defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
42 #include <ufs/ufs/dinode.h>
43 #include <ufs/ffs/fs.h>
48 #else /* AFS_VFSINCL_ENV */
49 #if !defined(AFS_AIX_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_XBSD_ENV)
52 #endif /* AFS_VFSINCL_ENV */
53 #endif /* AFS_OSF_ENV */
54 #endif /* AFS_SGI_ENV */
55 #endif /* AFS_NT40_ENV */
73 #if defined(AFS_SUN_ENV) || defined(AFS_SUN5_ENV)
75 #include <sys/mnttab.h>
76 #include <sys/mntent.h>
82 #if defined(AFS_SGI_ENV)
87 #ifndef AFS_LINUX20_ENV
88 #include <fstab.h> /* Need to find in libc 5, present in libc 6 */
91 #endif /* AFS_SGI_ENV */
93 #endif /* AFS_HPUX_ENV */
97 #include <netinet/in.h>
101 #include <sys/time.h>
102 #endif /* ITIMER_REAL */
103 #endif /* AFS_NT40_ENV */
104 #if defined(AFS_SUN5_ENV) || defined(AFS_NT40_ENV) || defined(AFS_LINUX20_ENV)
111 #include <afs/errors.h>
114 #include <afs/afssyscalls.h>
116 #include <afs/afsutil.h>
120 #include "daemon_com.h"
122 #include "salvsync.h"
125 #include "partition.h"
126 #include "volume_inline.h"
127 #ifdef AFS_PTHREAD_ENV
129 #else /* AFS_PTHREAD_ENV */
130 #include "afs/assert.h"
131 #endif /* AFS_PTHREAD_ENV */
138 #if !defined(offsetof)
143 #define afs_stat stat64
144 #define afs_fstat fstat64
145 #define afs_open open64
146 #else /* !O_LARGEFILE */
147 #define afs_stat stat
148 #define afs_fstat fstat
149 #define afs_open open
150 #endif /* !O_LARGEFILE */
152 #ifdef AFS_PTHREAD_ENV
153 pthread_mutex_t vol_glock_mutex;
154 pthread_mutex_t vol_trans_mutex;
155 pthread_cond_t vol_put_volume_cond;
156 pthread_cond_t vol_sleep_cond;
157 int vol_attach_threads = 1;
158 #endif /* AFS_PTHREAD_ENV */
160 #ifdef AFS_DEMAND_ATTACH_FS
161 pthread_mutex_t vol_salvsync_mutex;
162 #endif /* AFS_DEMAND_ATTACH_FS */
165 extern void *calloc(), *realloc();
168 /*@printflike@*/ extern void Log(const char *format, ...);
170 /* Forward declarations */
171 static Volume *attach2(Error * ec, VolId vid, char *path,
172 register struct VolumeHeader *header,
173 struct DiskPartition64 *partp, Volume * vp,
174 int isbusy, int mode);
175 static void ReallyFreeVolume(Volume * vp);
176 #ifdef AFS_DEMAND_ATTACH_FS
177 static void FreeVolume(Volume * vp);
178 #else /* !AFS_DEMAND_ATTACH_FS */
179 #define FreeVolume(vp) ReallyFreeVolume(vp)
180 static void VScanUpdateList(void);
181 #endif /* !AFS_DEMAND_ATTACH_FS */
182 static void VInitVolumeHeaderCache(afs_uint32 howMany);
183 static int GetVolumeHeader(register Volume * vp);
184 static void ReleaseVolumeHeader(register struct volHeader *hd);
185 static void FreeVolumeHeader(register Volume * vp);
186 static void AddVolumeToHashTable(register Volume * vp, int hashid);
187 static void DeleteVolumeFromHashTable(register Volume * vp);
188 static int VHold(Volume * vp);
189 static int VHold_r(Volume * vp);
190 static void VGetBitmap_r(Error * ec, Volume * vp, VnodeClass class);
191 static void VReleaseVolumeHandles_r(Volume * vp);
192 static void VCloseVolumeHandles_r(Volume * vp);
193 static void LoadVolumeHeader(Error * ec, Volume * vp);
194 static int VCheckOffline(register Volume * vp);
195 static int VCheckDetach(register Volume * vp);
196 static Volume * GetVolume(Error * ec, Error * client_ec, VolId volumeId, Volume * hint, int flags);
197 static int VolumeExternalName_r(VolumeId volumeId, char * name, size_t len);
199 int LogLevel; /* Vice loglevel--not defined as extern so that it will be
200 * defined when not linked with vice, XXXX */
201 ProgramType programType; /* The type of program using the package */
203 /* extended volume package statistics */
206 #ifdef VOL_LOCK_DEBUG
207 pthread_t vol_glock_holder = 0;
211 #define VOLUME_BITMAP_GROWSIZE 16 /* bytes, => 128vnodes */
212 /* Must be a multiple of 4 (1 word) !! */
214 /* this parameter needs to be tunable at runtime.
215 * 128 was really inadequate for largish servers -- at 16384 volumes this
216 * puts average chain length at 128, thus an average 65 deref's to find a volptr.
217 * talk about bad spatial locality...
219 * an AVL or splay tree might work a lot better, but we'll just increase
220 * the default hash table size for now
222 #define DEFAULT_VOLUME_HASH_SIZE 256 /* Must be a power of 2!! */
223 #define DEFAULT_VOLUME_HASH_MASK (DEFAULT_VOLUME_HASH_SIZE-1)
224 #define VOLUME_HASH(volumeId) (volumeId&(VolumeHashTable.Mask))
227 * turn volume hash chains into partially ordered lists.
228 * when the threshold is exceeded between two adjacent elements,
229 * perform a chain rebalancing operation.
231 * keep the threshold high in order to keep cache line invalidates
232 * low "enough" on SMPs
234 #define VOLUME_HASH_REORDER_THRESHOLD 200
237 * when possible, don't just reorder single elements, but reorder
238 * entire chains of elements at once. a chain of elements that
239 * exceed the element previous to the pivot by at least CHAIN_THRESH
240 * accesses are moved in front of the chain whose elements have at
241 * least CHAIN_THRESH less accesses than the pivot element
243 #define VOLUME_HASH_REORDER_CHAIN_THRESH (VOLUME_HASH_REORDER_THRESHOLD / 2)
245 #include "rx/rx_queue.h"
248 VolumeHashTable_t VolumeHashTable = {
249 DEFAULT_VOLUME_HASH_SIZE,
250 DEFAULT_VOLUME_HASH_MASK,
255 static void VInitVolumeHash(void);
259 /* This macro is used where an ffs() call does not exist. Was in util/ffs.c */
263 afs_int32 ffs_tmp = x;
267 for (ffs_i = 1;; ffs_i++) {
274 #endif /* !AFS_HAVE_FFS */
276 #ifdef AFS_PTHREAD_ENV
277 typedef struct diskpartition_queue_t {
278 struct rx_queue queue;
279 struct DiskPartition64 * diskP;
280 } diskpartition_queue_t;
281 typedef struct vinitvolumepackage_thread_t {
282 struct rx_queue queue;
283 pthread_cond_t thread_done_cv;
284 int n_threads_complete;
285 } vinitvolumepackage_thread_t;
286 static void * VInitVolumePackageThread(void * args);
287 #endif /* AFS_PTHREAD_ENV */
289 static int VAttachVolumesByPartition(struct DiskPartition64 *diskP,
290 int * nAttached, int * nUnattached);
293 #ifdef AFS_DEMAND_ATTACH_FS
294 /* demand attach fileserver extensions */
297 * in the future we will support serialization of VLRU state into the fs_state
300 * these structures are the beginning of that effort
302 struct VLRU_DiskHeader {
303 struct versionStamp stamp; /* magic and structure version number */
304 afs_uint32 mtime; /* time of dump to disk */
305 afs_uint32 num_records; /* number of VLRU_DiskEntry records */
308 struct VLRU_DiskEntry {
309 afs_uint32 vid; /* volume ID */
310 afs_uint32 idx; /* generation */
311 afs_uint32 last_get; /* timestamp of last get */
314 struct VLRU_StartupQueue {
315 struct VLRU_DiskEntry * entry;
320 typedef struct vshutdown_thread_t {
322 pthread_mutex_t lock;
324 pthread_cond_t master_cv;
326 int n_threads_complete;
328 int schedule_version;
331 byte n_parts_done_pass;
332 byte part_thread_target[VOLMAXPARTS+1];
333 byte part_done_pass[VOLMAXPARTS+1];
334 struct rx_queue * part_pass_head[VOLMAXPARTS+1];
335 int stats[4][VOLMAXPARTS+1];
336 } vshutdown_thread_t;
337 static void * VShutdownThread(void * args);
340 static Volume * VAttachVolumeByVp_r(Error * ec, Volume * vp, int mode);
341 static int VCheckFree(Volume * vp);
344 static void AddVolumeToVByPList_r(Volume * vp);
345 static void DeleteVolumeFromVByPList_r(Volume * vp);
346 static void VVByPListBeginExclusive_r(struct DiskPartition64 * dp);
347 static void VVByPListEndExclusive_r(struct DiskPartition64 * dp);
348 static void VVByPListWait_r(struct DiskPartition64 * dp);
350 /* online salvager */
351 static int VCheckSalvage(register Volume * vp);
352 static int VUpdateSalvagePriority_r(Volume * vp);
353 static int VScheduleSalvage_r(Volume * vp);
354 static int VCancelSalvage_r(Volume * vp, int reason);
356 /* Volume hash table */
357 static void VReorderHash_r(VolumeHashChainHead * head, Volume * pp, Volume * vp);
358 static void VHashBeginExclusive_r(VolumeHashChainHead * head);
359 static void VHashEndExclusive_r(VolumeHashChainHead * head);
360 static void VHashWait_r(VolumeHashChainHead * head);
363 static int ShutdownVByPForPass_r(struct DiskPartition64 * dp, int pass);
364 static int ShutdownVolumeWalk_r(struct DiskPartition64 * dp, int pass,
365 struct rx_queue ** idx);
366 static void ShutdownController(vshutdown_thread_t * params);
367 static void ShutdownCreateSchedule(vshutdown_thread_t * params);
370 static void VLRU_ComputeConstants(void);
371 static void VInitVLRU(void);
372 static void VLRU_Init_Node_r(volatile Volume * vp);
373 static void VLRU_Add_r(volatile Volume * vp);
374 static void VLRU_Delete_r(volatile Volume * vp);
375 static void VLRU_UpdateAccess_r(volatile Volume * vp);
376 static void * VLRU_ScannerThread(void * args);
377 static void VLRU_Scan_r(int idx);
378 static void VLRU_Promote_r(int idx);
379 static void VLRU_Demote_r(int idx);
380 static void VLRU_SwitchQueues(volatile Volume * vp, int new_idx, int append);
383 static int VCheckSoftDetach(volatile Volume * vp, afs_uint32 thresh);
384 static int VCheckSoftDetachCandidate(volatile Volume * vp, afs_uint32 thresh);
385 static int VSoftDetachVolume_r(volatile Volume * vp, afs_uint32 thresh);
388 pthread_key_t VThread_key;
389 VThreadOptions_t VThread_defaults = {
390 0 /**< allow salvsync */
392 #endif /* AFS_DEMAND_ATTACH_FS */
395 struct Lock vol_listLock; /* Lock obtained when listing volumes:
396 * prevents a volume from being missed
397 * if the volume is attached during a
401 static int TimeZoneCorrection; /* Number of seconds west of GMT */
403 /* Common message used when the volume goes off line */
404 char *VSalvageMessage =
405 "Files in this volume are currently unavailable; call operations";
407 int VInit; /* 0 - uninitialized,
408 * 1 - initialized but not all volumes have been attached,
409 * 2 - initialized and all volumes have been attached,
410 * 3 - initialized, all volumes have been attached, and
411 * VConnectFS() has completed. */
414 bit32 VolumeCacheCheck; /* Incremented everytime a volume goes on line--
415 * used to stamp volume headers and in-core
416 * vnodes. When the volume goes on-line the
417 * vnode will be invalidated
418 * access only with VOL_LOCK held */
423 /***************************************************/
424 /* Startup routines */
425 /***************************************************/
428 VInitVolumePackage(ProgramType pt, afs_uint32 nLargeVnodes, afs_uint32 nSmallVnodes,
429 int connect, afs_uint32 volcache)
431 int errors = 0; /* Number of errors while finding vice partitions. */
437 memset(&VStats, 0, sizeof(VStats));
438 VStats.hdr_cache_size = 200;
440 VInitPartitionPackage();
442 #ifdef AFS_DEMAND_ATTACH_FS
443 if (programType == fileServer) {
446 VLRU_SetOptions(VLRU_SET_ENABLED, 0);
448 assert(pthread_key_create(&VThread_key, NULL) == 0);
451 #ifdef AFS_PTHREAD_ENV
452 assert(pthread_mutex_init(&vol_glock_mutex, NULL) == 0);
453 assert(pthread_mutex_init(&vol_trans_mutex, NULL) == 0);
454 assert(pthread_cond_init(&vol_put_volume_cond, NULL) == 0);
455 assert(pthread_cond_init(&vol_sleep_cond, NULL) == 0);
456 #else /* AFS_PTHREAD_ENV */
458 #endif /* AFS_PTHREAD_ENV */
459 Lock_Init(&vol_listLock);
461 srandom(time(0)); /* For VGetVolumeInfo */
462 gettimeofday(&tv, &tz);
463 TimeZoneCorrection = tz.tz_minuteswest * 60;
465 #ifdef AFS_DEMAND_ATTACH_FS
466 assert(pthread_mutex_init(&vol_salvsync_mutex, NULL) == 0);
467 #endif /* AFS_DEMAND_ATTACH_FS */
469 /* Ok, we have done enough initialization that fileserver can
470 * start accepting calls, even though the volumes may not be
471 * available just yet.
475 #if defined(AFS_DEMAND_ATTACH_FS) && defined(SALVSYNC_BUILD_SERVER)
476 if (programType == salvageServer) {
479 #endif /* AFS_DEMAND_ATTACH_FS */
480 #ifdef FSSYNC_BUILD_SERVER
481 if (programType == fileServer) {
485 #if defined(AFS_DEMAND_ATTACH_FS) && defined(SALVSYNC_BUILD_CLIENT)
486 if (programType == fileServer) {
487 /* establish a connection to the salvager at this point */
488 assert(VConnectSALV() != 0);
490 #endif /* AFS_DEMAND_ATTACH_FS */
492 if (volcache > VStats.hdr_cache_size)
493 VStats.hdr_cache_size = volcache;
494 VInitVolumeHeaderCache(VStats.hdr_cache_size);
496 VInitVnodes(vLarge, nLargeVnodes);
497 VInitVnodes(vSmall, nSmallVnodes);
500 errors = VAttachPartitions();
504 if (programType == fileServer) {
505 struct DiskPartition64 *diskP;
506 #ifdef AFS_PTHREAD_ENV
507 struct vinitvolumepackage_thread_t params;
508 struct diskpartition_queue_t * dpq;
509 int i, threads, parts;
511 pthread_attr_t attrs;
513 assert(pthread_cond_init(¶ms.thread_done_cv,NULL) == 0);
515 params.n_threads_complete = 0;
517 /* create partition work queue */
518 for (parts=0, diskP = DiskPartitionList; diskP; diskP = diskP->next, parts++) {
519 dpq = (diskpartition_queue_t *) malloc(sizeof(struct diskpartition_queue_t));
522 queue_Append(¶ms,dpq);
525 threads = MIN(parts, vol_attach_threads);
528 /* spawn off a bunch of initialization threads */
529 assert(pthread_attr_init(&attrs) == 0);
530 assert(pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED) == 0);
532 Log("VInitVolumePackage: beginning parallel fileserver startup\n");
533 #ifdef AFS_DEMAND_ATTACH_FS
534 Log("VInitVolumePackage: using %d threads to pre-attach volumes on %d partitions\n",
536 #else /* AFS_DEMAND_ATTACH_FS */
537 Log("VInitVolumePackage: using %d threads to attach volumes on %d partitions\n",
539 #endif /* AFS_DEMAND_ATTACH_FS */
542 for (i=0; i < threads; i++) {
543 assert(pthread_create
544 (&tid, &attrs, &VInitVolumePackageThread,
548 while(params.n_threads_complete < threads) {
549 VOL_CV_WAIT(¶ms.thread_done_cv);
553 assert(pthread_attr_destroy(&attrs) == 0);
555 /* if we're only going to run one init thread, don't bother creating
557 Log("VInitVolumePackage: beginning single-threaded fileserver startup\n");
558 #ifdef AFS_DEMAND_ATTACH_FS
559 Log("VInitVolumePackage: using 1 thread to pre-attach volumes on %d partition(s)\n",
561 #else /* AFS_DEMAND_ATTACH_FS */
562 Log("VInitVolumePackage: using 1 thread to attach volumes on %d partition(s)\n",
564 #endif /* AFS_DEMAND_ATTACH_FS */
566 VInitVolumePackageThread(¶ms);
569 assert(pthread_cond_destroy(¶ms.thread_done_cv) == 0);
571 #else /* AFS_PTHREAD_ENV */
575 /* Attach all the volumes in this partition */
576 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
577 int nAttached = 0, nUnattached = 0;
578 assert(VAttachVolumesByPartition(diskP, &nAttached, &nUnattached) == 0);
580 #endif /* AFS_PTHREAD_ENV */
583 VInit = 2; /* Initialized, and all volumes have been attached */
584 #ifdef FSSYNC_BUILD_CLIENT
585 if (programType == volumeUtility && connect) {
587 Log("Unable to connect to file server; will retry at need\n");
591 #ifdef AFS_DEMAND_ATTACH_FS
592 else if (programType == salvageServer) {
594 Log("Unable to connect to file server; aborted\n");
598 #endif /* AFS_DEMAND_ATTACH_FS */
599 #endif /* FSSYNC_BUILD_CLIENT */
603 #ifdef AFS_PTHREAD_ENV
605 VInitVolumePackageThread(void * args) {
606 int errors = 0; /* Number of errors while finding vice partitions. */
610 struct DiskPartition64 *diskP;
611 struct vinitvolumepackage_thread_t * params;
612 struct diskpartition_queue_t * dpq;
614 params = (vinitvolumepackage_thread_t *) args;
618 /* Attach all the volumes in this partition */
619 while (queue_IsNotEmpty(params)) {
620 int nAttached = 0, nUnattached = 0;
622 dpq = queue_First(params,diskpartition_queue_t);
628 assert(VAttachVolumesByPartition(diskP, &nAttached, &nUnattached) == 0);
633 params->n_threads_complete++;
634 pthread_cond_signal(¶ms->thread_done_cv);
638 #endif /* AFS_PTHREAD_ENV */
641 * attach all volumes on a given disk partition
644 VAttachVolumesByPartition(struct DiskPartition64 *diskP, int * nAttached, int * nUnattached)
650 Log("Partition %s: attaching volumes\n", diskP->name);
651 dirp = opendir(VPartitionPath(diskP));
653 Log("opendir on Partition %s failed!\n", diskP->name);
657 while ((dp = readdir(dirp))) {
659 p = strrchr(dp->d_name, '.');
660 if (p != NULL && strcmp(p, VHDREXT) == 0) {
663 #ifdef AFS_DEMAND_ATTACH_FS
664 vp = VPreAttachVolumeByName(&error, diskP->name, dp->d_name);
665 #else /* AFS_DEMAND_ATTACH_FS */
666 vp = VAttachVolumeByName(&error, diskP->name, dp->d_name,
668 #endif /* AFS_DEMAND_ATTACH_FS */
669 (*(vp ? nAttached : nUnattached))++;
670 if (error == VOFFLINE)
671 Log("Volume %d stays offline (/vice/offline/%s exists)\n", VolumeNumber(dp->d_name), dp->d_name);
672 else if (LogLevel >= 5) {
673 Log("Partition %s: attached volume %d (%s)\n",
674 diskP->name, VolumeNumber(dp->d_name),
677 #if !defined(AFS_DEMAND_ATTACH_FS)
681 #endif /* AFS_DEMAND_ATTACH_FS */
685 Log("Partition %s: attached %d volumes; %d volumes not attached\n", diskP->name, *nAttached, *nUnattached);
691 /***************************************************/
692 /* Shutdown routines */
693 /***************************************************/
697 * highly multithreaded volume package shutdown
699 * with the demand attach fileserver extensions,
700 * VShutdown has been modified to be multithreaded.
701 * In order to achieve optimal use of many threads,
702 * the shutdown code involves one control thread and
703 * n shutdown worker threads. The control thread
704 * periodically examines the number of volumes available
705 * for shutdown on each partition, and produces a worker
706 * thread allocation schedule. The idea is to eliminate
707 * redundant scheduling computation on the workers by
708 * having a single master scheduler.
710 * The scheduler's objectives are:
712 * each partition with volumes remaining gets allocated
713 * at least 1 thread (assuming sufficient threads)
715 * threads are allocated proportional to the number of
716 * volumes remaining to be offlined. This ensures that
717 * the OS I/O scheduler has many requests to elevator
718 * seek on partitions that will (presumably) take the
719 * longest amount of time (from now) to finish shutdown
720 * (3) keep threads busy
721 * when there are extra threads, they are assigned to
722 * partitions using a simple round-robin algorithm
724 * In the future, we may wish to add the ability to adapt
725 * to the relative performance patterns of each disk
730 * multi-step shutdown process
732 * demand attach shutdown is a four-step process. Each
733 * shutdown "pass" shuts down increasingly more difficult
734 * volumes. The main purpose is to achieve better cache
735 * utilization during shutdown.
738 * shutdown volumes in the unattached, pre-attached
741 * shutdown attached volumes with cached volume headers
743 * shutdown all volumes in non-exclusive states
745 * shutdown all remaining volumes
752 register Volume *vp, *np;
753 register afs_int32 code;
754 #ifdef AFS_DEMAND_ATTACH_FS
755 struct DiskPartition64 * diskP;
756 struct diskpartition_queue_t * dpq;
757 vshutdown_thread_t params;
759 pthread_attr_t attrs;
761 memset(¶ms, 0, sizeof(vshutdown_thread_t));
763 for (params.n_parts=0, diskP = DiskPartitionList;
764 diskP; diskP = diskP->next, params.n_parts++);
766 Log("VShutdown: shutting down on-line volumes on %d partition%s...\n",
767 params.n_parts, params.n_parts > 1 ? "s" : "");
769 if (vol_attach_threads > 1) {
770 /* prepare for parallel shutdown */
771 params.n_threads = vol_attach_threads;
772 assert(pthread_mutex_init(¶ms.lock, NULL) == 0);
773 assert(pthread_cond_init(¶ms.cv, NULL) == 0);
774 assert(pthread_cond_init(¶ms.master_cv, NULL) == 0);
775 assert(pthread_attr_init(&attrs) == 0);
776 assert(pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED) == 0);
779 /* setup the basic partition information structures for
780 * parallel shutdown */
781 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
783 struct rx_queue * qp, * nqp;
787 VVByPListWait_r(diskP);
788 VVByPListBeginExclusive_r(diskP);
791 for (queue_Scan(&diskP->vol_list, qp, nqp, rx_queue)) {
792 vp = (Volume *)((char *)qp - offsetof(Volume, vol_list));
796 Log("VShutdown: partition %s has %d volumes with attached headers\n",
797 VPartitionPath(diskP), count);
800 /* build up the pass 0 shutdown work queue */
801 dpq = (struct diskpartition_queue_t *) malloc(sizeof(struct diskpartition_queue_t));
804 queue_Prepend(¶ms, dpq);
806 params.part_pass_head[diskP->index] = queue_First(&diskP->vol_list, rx_queue);
809 Log("VShutdown: beginning parallel fileserver shutdown\n");
810 Log("VShutdown: using %d threads to offline volumes on %d partition%s\n",
811 vol_attach_threads, params.n_parts, params.n_parts > 1 ? "s" : "" );
813 /* do pass 0 shutdown */
814 assert(pthread_mutex_lock(¶ms.lock) == 0);
815 for (i=0; i < params.n_threads; i++) {
816 assert(pthread_create
817 (&tid, &attrs, &VShutdownThread,
821 /* wait for all the pass 0 shutdowns to complete */
822 while (params.n_threads_complete < params.n_threads) {
823 assert(pthread_cond_wait(¶ms.master_cv, ¶ms.lock) == 0);
825 params.n_threads_complete = 0;
827 assert(pthread_cond_broadcast(¶ms.cv) == 0);
828 assert(pthread_mutex_unlock(¶ms.lock) == 0);
830 Log("VShutdown: pass 0 completed using the 1 thread per partition algorithm\n");
831 Log("VShutdown: starting passes 1 through 3 using finely-granular mp-fast algorithm\n");
833 /* run the parallel shutdown scheduler. it will drop the glock internally */
834 ShutdownController(¶ms);
836 /* wait for all the workers to finish pass 3 and terminate */
837 while (params.pass < 4) {
838 VOL_CV_WAIT(¶ms.cv);
841 assert(pthread_attr_destroy(&attrs) == 0);
842 assert(pthread_cond_destroy(¶ms.cv) == 0);
843 assert(pthread_cond_destroy(¶ms.master_cv) == 0);
844 assert(pthread_mutex_destroy(¶ms.lock) == 0);
846 /* drop the VByPList exclusive reservations */
847 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
848 VVByPListEndExclusive_r(diskP);
849 Log("VShutdown: %s stats : (pass[0]=%d, pass[1]=%d, pass[2]=%d, pass[3]=%d)\n",
850 VPartitionPath(diskP),
851 params.stats[0][diskP->index],
852 params.stats[1][diskP->index],
853 params.stats[2][diskP->index],
854 params.stats[3][diskP->index]);
857 Log("VShutdown: shutdown finished using %d threads\n", params.n_threads);
859 /* if we're only going to run one shutdown thread, don't bother creating
861 Log("VShutdown: beginning single-threaded fileserver shutdown\n");
863 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
864 VShutdownByPartition_r(diskP);
868 Log("VShutdown: complete.\n");
869 #else /* AFS_DEMAND_ATTACH_FS */
870 Log("VShutdown: shutting down on-line volumes...\n");
871 for (i = 0; i < VolumeHashTable.Size; i++) {
872 /* try to hold first volume in the hash table */
873 for (queue_Scan(&VolumeHashTable.Table[i],vp,np,Volume)) {
877 Log("VShutdown: Attempting to take volume %u offline.\n",
880 /* next, take the volume offline (drops reference count) */
881 VOffline_r(vp, "File server was shut down");
885 Log("VShutdown: complete.\n");
886 #endif /* AFS_DEMAND_ATTACH_FS */
897 #ifdef AFS_DEMAND_ATTACH_FS
900 * shutdown control thread
903 ShutdownController(vshutdown_thread_t * params)
906 struct DiskPartition64 * diskP;
908 vshutdown_thread_t shadow;
910 ShutdownCreateSchedule(params);
912 while ((params->pass < 4) &&
913 (params->n_threads_complete < params->n_threads)) {
914 /* recompute schedule once per second */
916 memcpy(&shadow, params, sizeof(vshutdown_thread_t));
920 Log("ShutdownController: schedule version=%d, vol_remaining=%d, pass=%d\n",
921 shadow.schedule_version, shadow.vol_remaining, shadow.pass);
922 Log("ShutdownController: n_threads_complete=%d, n_parts_done_pass=%d\n",
923 shadow.n_threads_complete, shadow.n_parts_done_pass);
924 for (diskP = DiskPartitionList; diskP; diskP=diskP->next) {
926 Log("ShutdownController: part[%d] : (len=%d, thread_target=%d, done_pass=%d, pass_head=%p)\n",
929 shadow.part_thread_target[id],
930 shadow.part_done_pass[id],
931 shadow.part_pass_head[id]);
937 ShutdownCreateSchedule(params);
941 /* create the shutdown thread work schedule.
942 * this scheduler tries to implement fairness
943 * by allocating at least 1 thread to each
944 * partition with volumes to be shutdown,
945 * and then it attempts to allocate remaining
946 * threads based upon the amount of work left
949 ShutdownCreateSchedule(vshutdown_thread_t * params)
951 struct DiskPartition64 * diskP;
952 int sum, thr_workload, thr_left;
953 int part_residue[VOLMAXPARTS+1];
956 /* compute the total number of outstanding volumes */
958 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
959 sum += diskP->vol_list.len;
962 params->schedule_version++;
963 params->vol_remaining = sum;
968 /* compute average per-thread workload */
969 thr_workload = sum / params->n_threads;
970 if (sum % params->n_threads)
973 thr_left = params->n_threads;
974 memset(&part_residue, 0, sizeof(part_residue));
976 /* for fairness, give every partition with volumes remaining
977 * at least one thread */
978 for (diskP = DiskPartitionList; diskP && thr_left; diskP = diskP->next) {
980 if (diskP->vol_list.len) {
981 params->part_thread_target[id] = 1;
984 params->part_thread_target[id] = 0;
988 if (thr_left && thr_workload) {
989 /* compute length-weighted workloads */
992 for (diskP = DiskPartitionList; diskP && thr_left; diskP = diskP->next) {
994 delta = (diskP->vol_list.len / thr_workload) -
995 params->part_thread_target[id];
999 if (delta < thr_left) {
1000 params->part_thread_target[id] += delta;
1003 params->part_thread_target[id] += thr_left;
1011 /* try to assign any leftover threads to partitions that
1012 * had volume lengths closer to needing thread_target+1 */
1013 int max_residue, max_id;
1015 /* compute the residues */
1016 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1018 part_residue[id] = diskP->vol_list.len -
1019 (params->part_thread_target[id] * thr_workload);
1022 /* now try to allocate remaining threads to partitions with the
1023 * highest residues */
1026 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1028 if (part_residue[id] > max_residue) {
1029 max_residue = part_residue[id];
1038 params->part_thread_target[max_id]++;
1040 part_residue[max_id] = 0;
1045 /* punt and give any remaining threads equally to each partition */
1047 if (thr_left >= params->n_parts) {
1048 alloc = thr_left / params->n_parts;
1049 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1051 params->part_thread_target[id] += alloc;
1056 /* finish off the last of the threads */
1057 for (diskP = DiskPartitionList; thr_left && diskP; diskP = diskP->next) {
1059 params->part_thread_target[id]++;
1065 /* worker thread for parallel shutdown */
1067 VShutdownThread(void * args)
1069 struct rx_queue *qp;
1071 vshutdown_thread_t * params;
1072 int part, code, found, pass, schedule_version_save, count;
1073 struct DiskPartition64 *diskP;
1074 struct diskpartition_queue_t * dpq;
1077 params = (vshutdown_thread_t *) args;
1079 /* acquire the shutdown pass 0 lock */
1080 assert(pthread_mutex_lock(¶ms->lock) == 0);
1082 /* if there's still pass 0 work to be done,
1083 * get a work entry, and do a pass 0 shutdown */
1084 if (queue_IsNotEmpty(params)) {
1085 dpq = queue_First(params, diskpartition_queue_t);
1087 assert(pthread_mutex_unlock(¶ms->lock) == 0);
1093 while (ShutdownVolumeWalk_r(diskP, 0, ¶ms->part_pass_head[id]))
1095 params->stats[0][diskP->index] = count;
1096 assert(pthread_mutex_lock(¶ms->lock) == 0);
1099 params->n_threads_complete++;
1100 if (params->n_threads_complete == params->n_threads) {
1101 /* notify control thread that all workers have completed pass 0 */
1102 assert(pthread_cond_signal(¶ms->master_cv) == 0);
1104 while (params->pass == 0) {
1105 assert(pthread_cond_wait(¶ms->cv, ¶ms->lock) == 0);
1109 assert(pthread_mutex_unlock(¶ms->lock) == 0);
1112 pass = params->pass;
1115 /* now escalate through the more complicated shutdowns */
1117 schedule_version_save = params->schedule_version;
1119 /* find a disk partition to work on */
1120 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1122 if (params->part_thread_target[id] && !params->part_done_pass[id]) {
1123 params->part_thread_target[id]--;
1130 /* hmm. for some reason the controller thread couldn't find anything for
1131 * us to do. let's see if there's anything we can do */
1132 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1134 if (diskP->vol_list.len && !params->part_done_pass[id]) {
1137 } else if (!params->part_done_pass[id]) {
1138 params->part_done_pass[id] = 1;
1139 params->n_parts_done_pass++;
1141 Log("VShutdown: done shutting down volumes on partition %s.\n",
1142 VPartitionPath(diskP));
1148 /* do work on this partition until either the controller
1149 * creates a new schedule, or we run out of things to do
1150 * on this partition */
1153 while (!params->part_done_pass[id] &&
1154 (schedule_version_save == params->schedule_version)) {
1155 /* ShutdownVolumeWalk_r will drop the glock internally */
1156 if (!ShutdownVolumeWalk_r(diskP, pass, ¶ms->part_pass_head[id])) {
1157 if (!params->part_done_pass[id]) {
1158 params->part_done_pass[id] = 1;
1159 params->n_parts_done_pass++;
1161 Log("VShutdown: done shutting down volumes on partition %s.\n",
1162 VPartitionPath(diskP));
1170 params->stats[pass][id] += count;
1172 /* ok, everyone is done this pass, proceed */
1175 params->n_threads_complete++;
1176 while (params->pass == pass) {
1177 if (params->n_threads_complete == params->n_threads) {
1178 /* we are the last thread to complete, so we will
1179 * reinitialize worker pool state for the next pass */
1180 params->n_threads_complete = 0;
1181 params->n_parts_done_pass = 0;
1183 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1185 params->part_done_pass[id] = 0;
1186 params->part_pass_head[id] = queue_First(&diskP->vol_list, rx_queue);
1189 /* compute a new thread schedule before releasing all the workers */
1190 ShutdownCreateSchedule(params);
1192 /* wake up all the workers */
1193 assert(pthread_cond_broadcast(¶ms->cv) == 0);
1196 Log("VShutdown: pass %d completed using %d threads on %d partitions\n",
1197 pass, params->n_threads, params->n_parts);
1200 VOL_CV_WAIT(¶ms->cv);
1203 pass = params->pass;
1217 /* shut down all volumes on a given disk partition
1219 * note that this function will not allow mp-fast
1220 * shutdown of a partition */
1222 VShutdownByPartition_r(struct DiskPartition64 * dp)
1228 /* wait for other exclusive ops to finish */
1229 VVByPListWait_r(dp);
1231 /* begin exclusive access */
1232 VVByPListBeginExclusive_r(dp);
1234 /* pick the low-hanging fruit first,
1235 * then do the complicated ones last
1236 * (has the advantage of keeping
1237 * in-use volumes up until the bitter end) */
1238 for (pass = 0, total=0; pass < 4; pass++) {
1239 pass_stats[pass] = ShutdownVByPForPass_r(dp, pass);
1240 total += pass_stats[pass];
1243 /* end exclusive access */
1244 VVByPListEndExclusive_r(dp);
1246 Log("VShutdownByPartition: shut down %d volumes on %s (pass[0]=%d, pass[1]=%d, pass[2]=%d, pass[3]=%d)\n",
1247 total, VPartitionPath(dp), pass_stats[0], pass_stats[1], pass_stats[2], pass_stats[3]);
1252 /* internal shutdown functionality
1254 * for multi-pass shutdown:
1255 * 0 to only "shutdown" {pre,un}attached and error state volumes
1256 * 1 to also shutdown attached volumes w/ volume header loaded
1257 * 2 to also shutdown attached volumes w/o volume header loaded
1258 * 3 to also shutdown exclusive state volumes
1260 * caller MUST hold exclusive access on the hash chain
1261 * because we drop vol_glock_mutex internally
1263 * this function is reentrant for passes 1--3
1264 * (e.g. multiple threads can cooperate to
1265 * shutdown a partition mp-fast)
1267 * pass 0 is not scaleable because the volume state data is
1268 * synchronized by vol_glock mutex, and the locking overhead
1269 * is too high to drop the lock long enough to do linked list
1273 ShutdownVByPForPass_r(struct DiskPartition64 * dp, int pass)
1275 struct rx_queue * q = queue_First(&dp->vol_list, rx_queue);
1278 while (ShutdownVolumeWalk_r(dp, pass, &q))
1284 /* conditionally shutdown one volume on partition dp
1285 * returns 1 if a volume was shutdown in this pass,
1288 ShutdownVolumeWalk_r(struct DiskPartition64 * dp, int pass,
1289 struct rx_queue ** idx)
1291 struct rx_queue *qp, *nqp;
1296 for (queue_ScanFrom(&dp->vol_list, qp, qp, nqp, rx_queue)) {
1297 vp = (Volume *) (((char *)qp) - offsetof(Volume, vol_list));
1301 if ((V_attachState(vp) != VOL_STATE_UNATTACHED) &&
1302 (V_attachState(vp) != VOL_STATE_ERROR) &&
1303 (V_attachState(vp) != VOL_STATE_PREATTACHED)) {
1307 if ((V_attachState(vp) == VOL_STATE_ATTACHED) &&
1308 (vp->header == NULL)) {
1312 if (VIsExclusiveState(V_attachState(vp))) {
1317 DeleteVolumeFromVByPList_r(vp);
1318 VShutdownVolume_r(vp);
1328 * shutdown a specific volume
1330 /* caller MUST NOT hold a heavyweight ref on vp */
1332 VShutdownVolume_r(Volume * vp)
1336 VCreateReservation_r(vp);
1338 if (LogLevel >= 5) {
1339 Log("VShutdownVolume_r: vid=%u, device=%d, state=%hu\n",
1340 vp->hashid, vp->partition->device, V_attachState(vp));
1343 /* wait for other blocking ops to finish */
1344 VWaitExclusiveState_r(vp);
1346 assert(VIsValidState(V_attachState(vp)));
1348 switch(V_attachState(vp)) {
1349 case VOL_STATE_SALVAGING:
1350 /* make sure salvager knows we don't want
1351 * the volume back */
1352 VCancelSalvage_r(vp, SALVSYNC_SHUTDOWN);
1353 case VOL_STATE_PREATTACHED:
1354 case VOL_STATE_ERROR:
1355 VChangeState_r(vp, VOL_STATE_UNATTACHED);
1356 case VOL_STATE_UNATTACHED:
1358 case VOL_STATE_GOING_OFFLINE:
1359 case VOL_STATE_SHUTTING_DOWN:
1360 case VOL_STATE_ATTACHED:
1364 Log("VShutdown: Attempting to take volume %u offline.\n",
1367 /* take the volume offline (drops reference count) */
1368 VOffline_r(vp, "File server was shut down");
1373 VCancelReservation_r(vp);
1377 #endif /* AFS_DEMAND_ATTACH_FS */
1380 /***************************************************/
1381 /* Header I/O routines */
1382 /***************************************************/
1384 /* open a descriptor for the inode (h),
1385 * read in an on-disk structure into buffer (to) of size (size),
1386 * verify versionstamp in structure has magic (magic) and
1387 * optionally verify version (version) if (version) is nonzero
1390 ReadHeader(Error * ec, IHandle_t * h, char *to, int size, bit32 magic,
1393 struct versionStamp *vsn;
1408 if (FDH_SEEK(fdP, 0, SEEK_SET) < 0) {
1410 FDH_REALLYCLOSE(fdP);
1413 vsn = (struct versionStamp *)to;
1414 if (FDH_READ(fdP, to, size) != size || vsn->magic != magic) {
1416 FDH_REALLYCLOSE(fdP);
1421 /* Check is conditional, in case caller wants to inspect version himself */
1422 if (version && vsn->version != version) {
1428 WriteVolumeHeader_r(Error * ec, Volume * vp)
1430 IHandle_t *h = V_diskDataHandle(vp);
1440 if (FDH_SEEK(fdP, 0, SEEK_SET) < 0) {
1442 FDH_REALLYCLOSE(fdP);
1445 if (FDH_WRITE(fdP, (char *)&V_disk(vp), sizeof(V_disk(vp)))
1446 != sizeof(V_disk(vp))) {
1448 FDH_REALLYCLOSE(fdP);
1454 /* VolumeHeaderToDisk
1455 * Allows for storing 64 bit inode numbers in on-disk volume header
1458 /* convert in-memory representation of a volume header to the
1459 * on-disk representation of a volume header */
1461 VolumeHeaderToDisk(VolumeDiskHeader_t * dh, VolumeHeader_t * h)
1464 memset((char *)dh, 0, sizeof(VolumeDiskHeader_t));
1465 dh->stamp = h->stamp;
1467 dh->parent = h->parent;
1469 #ifdef AFS_64BIT_IOPS_ENV
1470 dh->volumeInfo_lo = (afs_int32) h->volumeInfo & 0xffffffff;
1471 dh->volumeInfo_hi = (afs_int32) (h->volumeInfo >> 32) & 0xffffffff;
1472 dh->smallVnodeIndex_lo = (afs_int32) h->smallVnodeIndex & 0xffffffff;
1473 dh->smallVnodeIndex_hi =
1474 (afs_int32) (h->smallVnodeIndex >> 32) & 0xffffffff;
1475 dh->largeVnodeIndex_lo = (afs_int32) h->largeVnodeIndex & 0xffffffff;
1476 dh->largeVnodeIndex_hi =
1477 (afs_int32) (h->largeVnodeIndex >> 32) & 0xffffffff;
1478 dh->linkTable_lo = (afs_int32) h->linkTable & 0xffffffff;
1479 dh->linkTable_hi = (afs_int32) (h->linkTable >> 32) & 0xffffffff;
1481 dh->volumeInfo_lo = h->volumeInfo;
1482 dh->smallVnodeIndex_lo = h->smallVnodeIndex;
1483 dh->largeVnodeIndex_lo = h->largeVnodeIndex;
1484 dh->linkTable_lo = h->linkTable;
1488 /* DiskToVolumeHeader
1489 * Converts an on-disk representation of a volume header to
1490 * the in-memory representation of a volume header.
1492 * Makes the assumption that AFS has *always*
1493 * zero'd the volume header file so that high parts of inode
1494 * numbers are 0 in older (SGI EFS) volume header files.
1497 DiskToVolumeHeader(VolumeHeader_t * h, VolumeDiskHeader_t * dh)
1499 memset((char *)h, 0, sizeof(VolumeHeader_t));
1500 h->stamp = dh->stamp;
1502 h->parent = dh->parent;
1504 #ifdef AFS_64BIT_IOPS_ENV
1506 (Inode) dh->volumeInfo_lo | ((Inode) dh->volumeInfo_hi << 32);
1508 h->smallVnodeIndex =
1509 (Inode) dh->smallVnodeIndex_lo | ((Inode) dh->
1510 smallVnodeIndex_hi << 32);
1512 h->largeVnodeIndex =
1513 (Inode) dh->largeVnodeIndex_lo | ((Inode) dh->
1514 largeVnodeIndex_hi << 32);
1516 (Inode) dh->linkTable_lo | ((Inode) dh->linkTable_hi << 32);
1518 h->volumeInfo = dh->volumeInfo_lo;
1519 h->smallVnodeIndex = dh->smallVnodeIndex_lo;
1520 h->largeVnodeIndex = dh->largeVnodeIndex_lo;
1521 h->linkTable = dh->linkTable_lo;
1526 /***************************************************/
1527 /* Volume Attachment routines */
1528 /***************************************************/
1530 #ifdef AFS_DEMAND_ATTACH_FS
1532 * pre-attach a volume given its path.
1534 * @param[out] ec outbound error code
1535 * @param[in] partition partition path string
1536 * @param[in] name volume id string
1538 * @return volume object pointer
1540 * @note A pre-attached volume will only have its partition
1541 * and hashid fields initialized. At first call to
1542 * VGetVolume, the volume will be fully attached.
1546 VPreAttachVolumeByName(Error * ec, char *partition, char *name)
1550 vp = VPreAttachVolumeByName_r(ec, partition, name);
1556 * pre-attach a volume given its path.
1558 * @param[out] ec outbound error code
1559 * @param[in] partition path to vice partition
1560 * @param[in] name volume id string
1562 * @return volume object pointer
1564 * @pre VOL_LOCK held
1566 * @internal volume package internal use only.
1569 VPreAttachVolumeByName_r(Error * ec, char *partition, char *name)
1571 return VPreAttachVolumeById_r(ec,
1573 VolumeNumber(name));
1577 * pre-attach a volume given its path and numeric volume id.
1579 * @param[out] ec error code return
1580 * @param[in] partition path to vice partition
1581 * @param[in] volumeId numeric volume id
1583 * @return volume object pointer
1585 * @pre VOL_LOCK held
1587 * @internal volume package internal use only.
1590 VPreAttachVolumeById_r(Error * ec,
1595 struct DiskPartition64 *partp;
1599 assert(programType == fileServer);
1601 if (!(partp = VGetPartition_r(partition, 0))) {
1603 Log("VPreAttachVolumeById_r: Error getting partition (%s)\n", partition);
1607 vp = VLookupVolume_r(ec, volumeId, NULL);
1612 return VPreAttachVolumeByVp_r(ec, partp, vp, volumeId);
1616 * preattach a volume.
1618 * @param[out] ec outbound error code
1619 * @param[in] partp pointer to partition object
1620 * @param[in] vp pointer to volume object
1621 * @param[in] vid volume id
1623 * @return volume object pointer
1625 * @pre VOL_LOCK is held.
1627 * @warning Returned volume object pointer does not have to
1628 * equal the pointer passed in as argument vp. There
1629 * are potential race conditions which can result in
1630 * the pointers having different values. It is up to
1631 * the caller to make sure that references are handled
1632 * properly in this case.
1634 * @note If there is already a volume object registered with
1635 * the same volume id, its pointer MUST be passed as
1636 * argument vp. Failure to do so will result in a silent
1637 * failure to preattach.
1639 * @internal volume package internal use only.
1642 VPreAttachVolumeByVp_r(Error * ec,
1643 struct DiskPartition64 * partp,
1651 /* check to see if pre-attach already happened */
1653 (V_attachState(vp) != VOL_STATE_UNATTACHED) &&
1654 (V_attachState(vp) != VOL_STATE_PREATTACHED) &&
1655 !VIsErrorState(V_attachState(vp))) {
1657 * pre-attach is a no-op in all but the following cases:
1659 * - volume is unattached
1660 * - volume is in an error state
1661 * - volume is pre-attached
1663 Log("VPreattachVolumeByVp_r: volume %u not in quiescent state\n", vid);
1666 /* we're re-attaching a volume; clear out some old state */
1667 memset(&vp->salvage, 0, sizeof(struct VolumeOnlineSalvage));
1669 if (V_partition(vp) != partp) {
1670 /* XXX potential race */
1671 DeleteVolumeFromVByPList_r(vp);
1674 /* if we need to allocate a new Volume struct,
1675 * go ahead and drop the vol glock, otherwise
1676 * do the basic setup synchronised, as it's
1677 * probably not worth dropping the lock */
1680 /* allocate the volume structure */
1681 vp = nvp = (Volume *) malloc(sizeof(Volume));
1683 memset(vp, 0, sizeof(Volume));
1684 queue_Init(&vp->vnode_list);
1685 assert(pthread_cond_init(&V_attachCV(vp), NULL) == 0);
1688 /* link the volume with its associated vice partition */
1689 vp->device = partp->device;
1690 vp->partition = partp;
1693 vp->specialStatus = 0;
1695 /* if we dropped the lock, reacquire the lock,
1696 * check for pre-attach races, and then add
1697 * the volume to the hash table */
1700 nvp = VLookupVolume_r(ec, vid, NULL);
1705 } else if (nvp) { /* race detected */
1710 /* hack to make up for VChangeState_r() decrementing
1711 * the old state counter */
1712 VStats.state_levels[0]++;
1716 /* put pre-attached volume onto the hash table
1717 * and bring it up to the pre-attached state */
1718 AddVolumeToHashTable(vp, vp->hashid);
1719 AddVolumeToVByPList_r(vp);
1720 VLRU_Init_Node_r(vp);
1721 VChangeState_r(vp, VOL_STATE_PREATTACHED);
1724 Log("VPreAttachVolumeByVp_r: volume %u pre-attached\n", vp->hashid);
1732 #endif /* AFS_DEMAND_ATTACH_FS */
1734 /* Attach an existing volume, given its pathname, and return a
1735 pointer to the volume header information. The volume also
1736 normally goes online at this time. An offline volume
1737 must be reattached to make it go online */
1739 VAttachVolumeByName(Error * ec, char *partition, char *name, int mode)
1743 retVal = VAttachVolumeByName_r(ec, partition, name, mode);
1749 VAttachVolumeByName_r(Error * ec, char *partition, char *name, int mode)
1751 register Volume *vp = NULL, *svp = NULL;
1753 struct afs_stat status;
1754 struct VolumeDiskHeader diskHeader;
1755 struct VolumeHeader iheader;
1756 struct DiskPartition64 *partp;
1760 #ifdef AFS_DEMAND_ATTACH_FS
1761 VolumeStats stats_save;
1762 #endif /* AFS_DEMAND_ATTACH_FS */
1766 volumeId = VolumeNumber(name);
1768 if (!(partp = VGetPartition_r(partition, 0))) {
1770 Log("VAttachVolume: Error getting partition (%s)\n", partition);
1774 if (programType == volumeUtility) {
1776 VLockPartition_r(partition);
1777 } else if (programType == fileServer) {
1778 #ifdef AFS_DEMAND_ATTACH_FS
1779 /* lookup the volume in the hash table */
1780 vp = VLookupVolume_r(ec, volumeId, NULL);
1786 /* save any counters that are supposed to
1787 * be monotonically increasing over the
1788 * lifetime of the fileserver */
1789 memcpy(&stats_save, &vp->stats, sizeof(VolumeStats));
1791 memset(&stats_save, 0, sizeof(VolumeStats));
1794 /* if there's something in the hash table, and it's not
1795 * in the pre-attach state, then we may need to detach
1796 * it before proceeding */
1797 if (vp && (V_attachState(vp) != VOL_STATE_PREATTACHED)) {
1798 VCreateReservation_r(vp);
1799 VWaitExclusiveState_r(vp);
1801 /* at this point state must be one of:
1810 if (vp->specialStatus == VBUSY)
1813 /* if it's already attached, see if we can return it */
1814 if (V_attachState(vp) == VOL_STATE_ATTACHED) {
1815 VGetVolumeByVp_r(ec, vp);
1816 if (V_inUse(vp) == fileServer) {
1817 VCancelReservation_r(vp);
1821 /* otherwise, we need to detach, and attempt to re-attach */
1822 VDetachVolume_r(ec, vp);
1824 Log("VAttachVolume: Error detaching old volume instance (%s)\n", name);
1827 /* if it isn't fully attached, delete from the hash tables,
1828 and let the refcounter handle the rest */
1829 DeleteVolumeFromHashTable(vp);
1830 DeleteVolumeFromVByPList_r(vp);
1833 VCancelReservation_r(vp);
1837 /* pre-attach volume if it hasn't been done yet */
1839 (V_attachState(vp) == VOL_STATE_UNATTACHED) ||
1840 (V_attachState(vp) == VOL_STATE_ERROR)) {
1842 vp = VPreAttachVolumeByVp_r(ec, partp, vp, volumeId);
1850 /* handle pre-attach races
1852 * multiple threads can race to pre-attach a volume,
1853 * but we can't let them race beyond that
1855 * our solution is to let the first thread to bring
1856 * the volume into an exclusive state win; the other
1857 * threads just wait until it finishes bringing the
1858 * volume online, and then they do a vgetvolumebyvp
1860 if (svp && (svp != vp)) {
1861 /* wait for other exclusive ops to finish */
1862 VCreateReservation_r(vp);
1863 VWaitExclusiveState_r(vp);
1865 /* get a heavyweight ref, kill the lightweight ref, and return */
1866 VGetVolumeByVp_r(ec, vp);
1867 VCancelReservation_r(vp);
1871 /* at this point, we are chosen as the thread to do
1872 * demand attachment for this volume. all other threads
1873 * doing a getvolume on vp->hashid will block until we finish */
1875 /* make sure any old header cache entries are invalidated
1876 * before proceeding */
1877 FreeVolumeHeader(vp);
1879 VChangeState_r(vp, VOL_STATE_ATTACHING);
1881 /* restore any saved counters */
1882 memcpy(&vp->stats, &stats_save, sizeof(VolumeStats));
1883 #else /* AFS_DEMAND_ATTACH_FS */
1884 vp = VGetVolume_r(ec, volumeId);
1886 if (V_inUse(vp) == fileServer)
1888 if (vp->specialStatus == VBUSY)
1890 VDetachVolume_r(ec, vp);
1892 Log("VAttachVolume: Error detaching volume (%s)\n", name);
1896 #endif /* AFS_DEMAND_ATTACH_FS */
1900 strcpy(path, VPartitionPath(partp));
1906 if ((fd = afs_open(path, O_RDONLY)) == -1 || afs_fstat(fd, &status) == -1) {
1907 Log("VAttachVolume: Failed to open %s (errno %d)\n", path, errno);
1914 n = read(fd, &diskHeader, sizeof(diskHeader));
1916 if (n != sizeof(diskHeader)
1917 || diskHeader.stamp.magic != VOLUMEHEADERMAGIC) {
1918 Log("VAttachVolume: Error reading volume header %s\n", path);
1923 if (diskHeader.stamp.version != VOLUMEHEADERVERSION) {
1924 Log("VAttachVolume: Volume %s, version number is incorrect; volume needs salvaged\n", path);
1930 DiskToVolumeHeader(&iheader, &diskHeader);
1931 #ifdef FSSYNC_BUILD_CLIENT
1932 if (programType == volumeUtility && mode != V_SECRETLY && mode != V_PEEK) {
1934 if (FSYNC_VolOp(iheader.id, partition, FSYNC_VOL_NEEDVOLUME, mode, NULL)
1936 Log("VAttachVolume: attach of volume %u apparently denied by file server\n", iheader.id);
1937 *ec = VNOVOL; /* XXXX */
1945 vp = (Volume *) calloc(1, sizeof(Volume));
1947 vp->device = partp->device;
1948 vp->partition = partp;
1949 queue_Init(&vp->vnode_list);
1950 #ifdef AFS_DEMAND_ATTACH_FS
1951 assert(pthread_cond_init(&V_attachCV(vp), NULL) == 0);
1952 #endif /* AFS_DEMAND_ATTACH_FS */
1955 /* attach2 is entered without any locks, and returns
1956 * with vol_glock_mutex held */
1957 vp = attach2(ec, volumeId, path, &iheader, partp, vp, isbusy, mode);
1959 if (programType == volumeUtility && vp) {
1960 if ((mode == V_VOLUPD) || (VolumeWriteable(vp) && (mode == V_CLONE))) {
1961 /* mark volume header as in use so that volser crashes lead to a
1962 * salvage attempt */
1963 VUpdateVolume_r(ec, vp, 0);
1965 #ifdef AFS_DEMAND_ATTACH_FS
1966 /* for dafs, we should tell the fileserver, except for V_PEEK
1967 * where we know it is not necessary */
1968 if (mode == V_PEEK) {
1969 vp->needsPutBack = 0;
1971 vp->needsPutBack = 1;
1973 #else /* !AFS_DEMAND_ATTACH_FS */
1974 /* duplicate computation in fssync.c about whether the server
1975 * takes the volume offline or not. If the volume isn't
1976 * offline, we must not return it when we detach the volume,
1977 * or the server will abort */
1978 if (mode == V_READONLY || mode == V_PEEK
1979 || (!VolumeWriteable(vp) && (mode == V_CLONE || mode == V_DUMP)))
1980 vp->needsPutBack = 0;
1982 vp->needsPutBack = 1;
1983 #endif /* !AFS_DEMAND_ATTACH_FS */
1985 /* OK, there's a problem here, but one that I don't know how to
1986 * fix right now, and that I don't think should arise often.
1987 * Basically, we should only put back this volume to the server if
1988 * it was given to us by the server, but since we don't have a vp,
1989 * we can't run the VolumeWriteable function to find out as we do
1990 * above when computing vp->needsPutBack. So we send it back, but
1991 * there's a path in VAttachVolume on the server which may abort
1992 * if this volume doesn't have a header. Should be pretty rare
1993 * for all of that to happen, but if it does, probably the right
1994 * fix is for the server to allow the return of readonly volumes
1995 * that it doesn't think are really checked out. */
1996 #ifdef FSSYNC_BUILD_CLIENT
1997 if (programType == volumeUtility && vp == NULL &&
1998 mode != V_SECRETLY && mode != V_PEEK) {
1999 FSYNC_VolOp(iheader.id, partition, FSYNC_VOL_ON, 0, NULL);
2002 if (programType == fileServer && vp) {
2003 #ifdef AFS_DEMAND_ATTACH_FS
2005 * we can get here in cases where we don't "own"
2006 * the volume (e.g. volume owned by a utility).
2007 * short circuit around potential disk header races.
2009 if (V_attachState(vp) != VOL_STATE_ATTACHED) {
2013 V_needsCallback(vp) = 0;
2015 if (VInit >= 2 && V_BreakVolumeCallbacks) {
2016 Log("VAttachVolume: Volume %u was changed externally; breaking callbacks\n", V_id(vp));
2017 (*V_BreakVolumeCallbacks) (V_id(vp));
2020 VUpdateVolume_r(ec, vp, 0);
2022 Log("VAttachVolume: Error updating volume\n");
2027 if (VolumeWriteable(vp) && V_dontSalvage(vp) == 0) {
2028 #ifndef AFS_DEMAND_ATTACH_FS
2029 /* This is a hack: by temporarily setting the incore
2030 * dontSalvage flag ON, the volume will be put back on the
2031 * Update list (with dontSalvage OFF again). It will then
2032 * come back in N minutes with DONT_SALVAGE eventually
2033 * set. This is the way that volumes that have never had
2034 * it set get it set; or that volumes that have been
2035 * offline without DONT SALVAGE having been set also
2036 * eventually get it set */
2037 V_dontSalvage(vp) = DONT_SALVAGE;
2038 #endif /* !AFS_DEMAND_ATTACH_FS */
2039 VAddToVolumeUpdateList_r(ec, vp);
2041 Log("VAttachVolume: Error adding volume to update list\n");
2048 Log("VOnline: volume %u (%s) attached and online\n", V_id(vp),
2053 if (programType == volumeUtility) {
2054 VUnlockPartition_r(partition);
2057 #ifdef AFS_DEMAND_ATTACH_FS
2058 /* attach failed; make sure we're in error state */
2059 if (vp && !VIsErrorState(V_attachState(vp))) {
2060 VChangeState_r(vp, VOL_STATE_ERROR);
2062 #endif /* AFS_DEMAND_ATTACH_FS */
2069 #ifdef AFS_DEMAND_ATTACH_FS
2070 /* VAttachVolumeByVp_r
2072 * finish attaching a volume that is
2073 * in a less than fully attached state
2075 /* caller MUST hold a ref count on vp */
2077 VAttachVolumeByVp_r(Error * ec, Volume * vp, int mode)
2079 char name[VMAXPATHLEN];
2080 int fd, n, reserve = 0;
2081 struct afs_stat status;
2082 struct VolumeDiskHeader diskHeader;
2083 struct VolumeHeader iheader;
2084 struct DiskPartition64 *partp;
2089 VolumeStats stats_save;
2092 /* volume utility should never call AttachByVp */
2093 assert(programType == fileServer);
2095 volumeId = vp->hashid;
2096 partp = vp->partition;
2097 VolumeExternalName_r(volumeId, name, sizeof(name));
2100 /* if another thread is performing a blocking op, wait */
2101 VWaitExclusiveState_r(vp);
2103 memcpy(&stats_save, &vp->stats, sizeof(VolumeStats));
2105 /* if it's already attached, see if we can return it */
2106 if (V_attachState(vp) == VOL_STATE_ATTACHED) {
2107 VGetVolumeByVp_r(ec, vp);
2108 if (V_inUse(vp) == fileServer) {
2111 if (vp->specialStatus == VBUSY)
2113 VDetachVolume_r(ec, vp);
2115 Log("VAttachVolume: Error detaching volume (%s)\n", name);
2121 /* pre-attach volume if it hasn't been done yet */
2123 (V_attachState(vp) == VOL_STATE_UNATTACHED) ||
2124 (V_attachState(vp) == VOL_STATE_ERROR)) {
2125 nvp = VPreAttachVolumeByVp_r(ec, partp, vp, volumeId);
2131 VCreateReservation_r(nvp);
2137 VChangeState_r(vp, VOL_STATE_ATTACHING);
2139 /* restore monotonically increasing stats */
2140 memcpy(&vp->stats, &stats_save, sizeof(VolumeStats));
2145 /* compute path to disk header,
2147 * and verify magic and version stamps */
2148 strcpy(path, VPartitionPath(partp));
2154 if ((fd = afs_open(path, O_RDONLY)) == -1 || afs_fstat(fd, &status) == -1) {
2155 Log("VAttachVolume: Failed to open %s (errno %d)\n", path, errno);
2162 n = read(fd, &diskHeader, sizeof(diskHeader));
2164 if (n != sizeof(diskHeader)
2165 || diskHeader.stamp.magic != VOLUMEHEADERMAGIC) {
2166 Log("VAttachVolume: Error reading volume header %s\n", path);
2171 if (diskHeader.stamp.version != VOLUMEHEADERVERSION) {
2172 Log("VAttachVolume: Volume %s, version number is incorrect; volume needs salvaged\n", path);
2178 /* convert on-disk header format to in-memory header format */
2179 DiskToVolumeHeader(&iheader, &diskHeader);
2183 * NOTE: attach2 is entered without any locks, and returns
2184 * with vol_glock_mutex held */
2185 vp = attach2(ec, volumeId, path, &iheader, partp, vp, isbusy, mode);
2188 * the event that an error was encountered, or
2189 * the volume was not brought to an attached state
2190 * for any reason, skip to the end. We cannot
2191 * safely call VUpdateVolume unless we "own" it.
2195 (V_attachState(vp) != VOL_STATE_ATTACHED)) {
2199 V_needsCallback(vp) = 0;
2200 VUpdateVolume_r(ec, vp, 0);
2202 Log("VAttachVolume: Error updating volume %u\n", vp->hashid);
2206 if (VolumeWriteable(vp) && V_dontSalvage(vp) == 0) {
2207 #ifndef AFS_DEMAND_ATTACH_FS
2208 /* This is a hack: by temporarily setting the incore
2209 * dontSalvage flag ON, the volume will be put back on the
2210 * Update list (with dontSalvage OFF again). It will then
2211 * come back in N minutes with DONT_SALVAGE eventually
2212 * set. This is the way that volumes that have never had
2213 * it set get it set; or that volumes that have been
2214 * offline without DONT SALVAGE having been set also
2215 * eventually get it set */
2216 V_dontSalvage(vp) = DONT_SALVAGE;
2217 #endif /* !AFS_DEMAND_ATTACH_FS */
2218 VAddToVolumeUpdateList_r(ec, vp);
2220 Log("VAttachVolume: Error adding volume %u to update list\n", vp->hashid);
2227 Log("VOnline: volume %u (%s) attached and online\n", V_id(vp),
2231 VCancelReservation_r(nvp);
2234 if (*ec && (*ec != VOFFLINE) && (*ec != VSALVAGE)) {
2235 if (vp && !VIsErrorState(V_attachState(vp))) {
2236 VChangeState_r(vp, VOL_STATE_ERROR);
2243 #endif /* AFS_DEMAND_ATTACH_FS */
2246 * called without any locks held
2247 * returns with vol_glock_mutex held
2250 attach2(Error * ec, VolId volumeId, char *path, register struct VolumeHeader * header,
2251 struct DiskPartition64 * partp, register Volume * vp, int isbusy, int mode)
2253 vp->specialStatus = (byte) (isbusy ? VBUSY : 0);
2254 IH_INIT(vp->vnodeIndex[vLarge].handle, partp->device, header->parent,
2255 header->largeVnodeIndex);
2256 IH_INIT(vp->vnodeIndex[vSmall].handle, partp->device, header->parent,
2257 header->smallVnodeIndex);
2258 IH_INIT(vp->diskDataHandle, partp->device, header->parent,
2259 header->volumeInfo);
2260 IH_INIT(vp->linkHandle, partp->device, header->parent, header->linkTable);
2261 vp->shuttingDown = 0;
2262 vp->goingOffline = 0;
2264 #ifdef AFS_DEMAND_ATTACH_FS
2265 vp->stats.last_attach = FT_ApproxTime();
2266 vp->stats.attaches++;
2270 IncUInt64(&VStats.attaches);
2271 vp->cacheCheck = ++VolumeCacheCheck;
2272 /* just in case this ever rolls over */
2273 if (!vp->cacheCheck)
2274 vp->cacheCheck = ++VolumeCacheCheck;
2275 GetVolumeHeader(vp);
2278 #if defined(AFS_DEMAND_ATTACH_FS) && defined(FSSYNC_BUILD_CLIENT)
2279 /* demand attach changes the V_PEEK mechanism
2281 * we can now suck the current disk data structure over
2282 * the fssync interface without going to disk
2284 * (technically, we don't need to restrict this feature
2285 * to demand attach fileservers. However, I'm trying
2286 * to limit the number of common code changes)
2288 if (programType != fileServer && mode == V_PEEK) {
2290 res.payload.len = sizeof(VolumeDiskData);
2291 res.payload.buf = &vp->header->diskstuff;
2293 if (FSYNC_VolOp(volumeId,
2295 FSYNC_VOL_QUERY_HDR,
2298 goto disk_header_loaded;
2301 #endif /* AFS_DEMAND_ATTACH_FS && FSSYNC_BUILD_CLIENT */
2302 (void)ReadHeader(ec, V_diskDataHandle(vp), (char *)&V_disk(vp),
2303 sizeof(V_disk(vp)), VOLUMEINFOMAGIC, VOLUMEINFOVERSION);
2305 #ifdef AFS_DEMAND_ATTACH_FS
2308 IncUInt64(&VStats.hdr_loads);
2309 IncUInt64(&vp->stats.hdr_loads);
2311 #endif /* AFS_DEMAND_ATTACH_FS */
2314 Log("VAttachVolume: Error reading diskDataHandle vol header %s; error=%u\n", path, *ec);
2319 #ifdef AFS_DEMAND_ATTACH_FS
2322 /* check for pending volume operations */
2323 if (vp->pending_vol_op) {
2324 /* see if the pending volume op requires exclusive access */
2325 switch (vp->pending_vol_op->vol_op_state) {
2326 case FSSYNC_VolOpPending:
2327 /* this should never happen */
2328 assert(vp->pending_vol_op->vol_op_state != FSSYNC_VolOpPending);
2331 case FSSYNC_VolOpRunningUnknown:
2332 vp->pending_vol_op->vol_op_state =
2333 (VVolOpLeaveOnline_r(vp, vp->pending_vol_op) ?
2334 FSSYNC_VolOpRunningOnline : FSSYNC_VolOpRunningOffline);
2337 case FSSYNC_VolOpRunningOffline:
2338 /* mark the volume down */
2340 VChangeState_r(vp, VOL_STATE_UNATTACHED);
2341 if (V_offlineMessage(vp)[0] == '\0')
2342 strlcpy(V_offlineMessage(vp),
2343 "A volume utility is running.",
2344 sizeof(V_offlineMessage(vp)));
2345 V_offlineMessage(vp)[sizeof(V_offlineMessage(vp)) - 1] = '\0';
2347 /* check to see if we should set the specialStatus flag */
2348 if (VVolOpSetVBusy_r(vp, vp->pending_vol_op)) {
2349 vp->specialStatus = VBUSY;
2354 V_attachFlags(vp) |= VOL_HDR_LOADED;
2355 vp->stats.last_hdr_load = vp->stats.last_attach;
2357 #endif /* AFS_DEMAND_ATTACH_FS */
2360 struct IndexFileHeader iHead;
2362 #if OPENAFS_VOL_STATS
2364 * We just read in the diskstuff part of the header. If the detailed
2365 * volume stats area has not yet been initialized, we should bzero the
2366 * area and mark it as initialized.
2368 if (!(V_stat_initialized(vp))) {
2369 memset((char *)(V_stat_area(vp)), 0, VOL_STATS_BYTES);
2370 V_stat_initialized(vp) = 1;
2372 #endif /* OPENAFS_VOL_STATS */
2374 (void)ReadHeader(ec, vp->vnodeIndex[vSmall].handle,
2375 (char *)&iHead, sizeof(iHead),
2376 SMALLINDEXMAGIC, SMALLINDEXVERSION);
2379 Log("VAttachVolume: Error reading smallVnode vol header %s; error=%u\n", path, *ec);
2384 struct IndexFileHeader iHead;
2386 (void)ReadHeader(ec, vp->vnodeIndex[vLarge].handle,
2387 (char *)&iHead, sizeof(iHead),
2388 LARGEINDEXMAGIC, LARGEINDEXVERSION);
2391 Log("VAttachVolume: Error reading largeVnode vol header %s; error=%u\n", path, *ec);
2395 #ifdef AFS_NAMEI_ENV
2397 struct versionStamp stamp;
2399 (void)ReadHeader(ec, V_linkHandle(vp), (char *)&stamp,
2400 sizeof(stamp), LINKTABLEMAGIC, LINKTABLEVERSION);
2403 Log("VAttachVolume: Error reading namei vol header %s; error=%u\n", path, *ec);
2406 #endif /* AFS_NAMEI_ENV */
2408 #if defined(AFS_DEMAND_ATTACH_FS)
2409 if (*ec && ((*ec != VOFFLINE) || (V_attachState(vp) != VOL_STATE_UNATTACHED))) {
2411 if (programType == fileServer) {
2412 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
2415 Log("VAttachVolume: Error attaching volume %s; volume needs salvage; error=%u\n", path, *ec);
2421 /* volume operation in progress */
2425 #else /* AFS_DEMAND_ATTACH_FS */
2427 Log("VAttachVolume: Error attaching volume %s; volume needs salvage; error=%u\n", path, *ec);
2432 #endif /* AFS_DEMAND_ATTACH_FS */
2434 if (V_needsSalvaged(vp)) {
2435 if (vp->specialStatus)
2436 vp->specialStatus = 0;
2438 #if defined(AFS_DEMAND_ATTACH_FS)
2439 if (programType == fileServer) {
2440 VRequestSalvage_r(ec, vp, SALVSYNC_NEEDED, VOL_SALVAGE_INVALIDATE_HEADER);
2443 Log("VAttachVolume: volume salvage flag is ON for %s; volume needs salvage\n", path);
2447 #else /* AFS_DEMAND_ATTACH_FS */
2450 #endif /* AFS_DEMAND_ATTACH_FS */
2455 if (programType == fileServer) {
2456 #ifndef FAST_RESTART
2457 if (V_inUse(vp) && VolumeWriteable(vp)) {
2458 if (!V_needsSalvaged(vp)) {
2459 V_needsSalvaged(vp) = 1;
2460 VUpdateVolume_r(ec, vp, 0);
2462 #if defined(AFS_DEMAND_ATTACH_FS)
2463 VRequestSalvage_r(ec, vp, SALVSYNC_NEEDED, VOL_SALVAGE_INVALIDATE_HEADER);
2465 #else /* AFS_DEMAND_ATTACH_FS */
2466 Log("VAttachVolume: volume %s needs to be salvaged; not attached.\n", path);
2469 #endif /* AFS_DEMAND_ATTACH_FS */
2472 #endif /* FAST_RESTART */
2474 if (V_destroyMe(vp) == DESTROY_ME) {
2475 #if defined(AFS_DEMAND_ATTACH_FS)
2476 /* schedule a salvage so the volume goes away on disk */
2477 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
2478 VChangeState_r(vp, VOL_STATE_ERROR);
2480 #endif /* AFS_DEMAND_ATTACH_FS */
2482 Log("VAttachVolume: volume %s is junk; it should be destroyed at next salvage\n", path);
2488 vp->nextVnodeUnique = V_uniquifier(vp);
2489 vp->vnodeIndex[vSmall].bitmap = vp->vnodeIndex[vLarge].bitmap = NULL;
2490 #ifndef BITMAP_LATER
2491 if (programType == fileServer && VolumeWriteable(vp)) {
2493 for (i = 0; i < nVNODECLASSES; i++) {
2494 VGetBitmap_r(ec, vp, i);
2496 #ifdef AFS_DEMAND_ATTACH_FS
2497 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
2499 #else /* AFS_DEMAND_ATTACH_FS */
2501 #endif /* AFS_DEMAND_ATTACH_FS */
2502 Log("VAttachVolume: error getting bitmap for volume (%s)\n",
2508 #endif /* BITMAP_LATER */
2510 if (programType == fileServer) {
2511 if (vp->specialStatus)
2512 vp->specialStatus = 0;
2513 if (V_blessed(vp) && V_inService(vp) && !V_needsSalvaged(vp)) {
2514 V_inUse(vp) = fileServer;
2515 V_offlineMessage(vp)[0] = '\0';
2518 if ((mode != V_PEEK) && (mode != V_SECRETLY))
2519 V_inUse(vp) = programType;
2520 V_checkoutMode(vp) = mode;
2523 AddVolumeToHashTable(vp, V_id(vp));
2524 #ifdef AFS_DEMAND_ATTACH_FS
2525 if ((programType != fileServer) ||
2526 (V_inUse(vp) == fileServer)) {
2527 AddVolumeToVByPList_r(vp);
2529 VChangeState_r(vp, VOL_STATE_ATTACHED);
2531 VChangeState_r(vp, VOL_STATE_UNATTACHED);
2537 /* Attach an existing volume.
2538 The volume also normally goes online at this time.
2539 An offline volume must be reattached to make it go online.
2543 VAttachVolume(Error * ec, VolumeId volumeId, int mode)
2547 retVal = VAttachVolume_r(ec, volumeId, mode);
2553 VAttachVolume_r(Error * ec, VolumeId volumeId, int mode)
2556 VGetVolumePath(ec, volumeId, &part, &name);
2558 register Volume *vp;
2560 vp = VGetVolume_r(&error, volumeId);
2562 assert(V_inUse(vp) == 0);
2563 VDetachVolume_r(ec, vp);
2567 return VAttachVolumeByName_r(ec, part, name, mode);
2570 /* Increment a reference count to a volume, sans context swaps. Requires
2571 * possibly reading the volume header in from the disk, since there's
2572 * an invariant in the volume package that nUsers>0 ==> vp->header is valid.
2574 * N.B. This call can fail if we can't read in the header!! In this case
2575 * we still guarantee we won't context swap, but the ref count won't be
2576 * incremented (otherwise we'd violate the invariant).
2578 /* NOTE: with the demand attach fileserver extensions, the global lock
2579 * is dropped within VHold */
2580 #ifdef AFS_DEMAND_ATTACH_FS
2582 VHold_r(register Volume * vp)
2586 VCreateReservation_r(vp);
2587 VWaitExclusiveState_r(vp);
2589 LoadVolumeHeader(&error, vp);
2591 VCancelReservation_r(vp);
2595 VCancelReservation_r(vp);
2598 #else /* AFS_DEMAND_ATTACH_FS */
2600 VHold_r(register Volume * vp)
2604 LoadVolumeHeader(&error, vp);
2610 #endif /* AFS_DEMAND_ATTACH_FS */
2613 VHold(register Volume * vp)
2617 retVal = VHold_r(vp);
2623 /***************************************************/
2624 /* get and put volume routines */
2625 /***************************************************/
2628 * put back a heavyweight reference to a volume object.
2630 * @param[in] vp volume object pointer
2632 * @pre VOL_LOCK held
2634 * @post heavyweight volume reference put back.
2635 * depending on state, volume may have been taken offline,
2636 * detached, salvaged, freed, etc.
2638 * @internal volume package internal use only
2641 VPutVolume_r(register Volume * vp)
2643 assert(--vp->nUsers >= 0);
2644 if (vp->nUsers == 0) {
2646 ReleaseVolumeHeader(vp->header);
2647 #ifdef AFS_DEMAND_ATTACH_FS
2648 if (!VCheckDetach(vp)) {
2652 #else /* AFS_DEMAND_ATTACH_FS */
2654 #endif /* AFS_DEMAND_ATTACH_FS */
2659 VPutVolume(register Volume * vp)
2667 /* Get a pointer to an attached volume. The pointer is returned regardless
2668 of whether or not the volume is in service or on/off line. An error
2669 code, however, is returned with an indication of the volume's status */
2671 VGetVolume(Error * ec, Error * client_ec, VolId volumeId)
2675 retVal = GetVolume(ec, client_ec, volumeId, NULL, 0);
2681 VGetVolume_r(Error * ec, VolId volumeId)
2683 return GetVolume(ec, NULL, volumeId, NULL, 0);
2686 /* try to get a volume we've previously looked up */
2687 /* for demand attach fs, caller MUST NOT hold a ref count on vp */
2689 VGetVolumeByVp_r(Error * ec, Volume * vp)
2691 return GetVolume(ec, NULL, vp->hashid, vp, 0);
2694 /* private interface for getting a volume handle
2695 * volumeId must be provided.
2696 * hint is an optional parameter to speed up hash lookups
2697 * flags is not used at this time
2699 /* for demand attach fs, caller MUST NOT hold a ref count on hint */
2701 GetVolume(Error * ec, Error * client_ec, VolId volumeId, Volume * hint, int flags)
2704 /* pull this profiling/debugging code out of regular builds */
2706 #define VGET_CTR_INC(x) x++
2707 unsigned short V0 = 0, V1 = 0, V2 = 0, V3 = 0, V5 = 0, V6 =
2708 0, V7 = 0, V8 = 0, V9 = 0;
2709 unsigned short V10 = 0, V11 = 0, V12 = 0, V13 = 0, V14 = 0, V15 = 0;
2711 #define VGET_CTR_INC(x)
2713 #ifdef AFS_DEMAND_ATTACH_FS
2714 Volume *avp, * rvp = hint;
2718 * if VInit is zero, the volume package dynamic
2719 * data structures have not been initialized yet,
2720 * and we must immediately return an error
2726 *client_ec = VOFFLINE;
2731 #ifdef AFS_DEMAND_ATTACH_FS
2733 VCreateReservation_r(rvp);
2735 #endif /* AFS_DEMAND_ATTACH_FS */
2743 vp = VLookupVolume_r(ec, volumeId, vp);
2749 #ifdef AFS_DEMAND_ATTACH_FS
2750 if (rvp && (rvp != vp)) {
2751 /* break reservation on old vp */
2752 VCancelReservation_r(rvp);
2755 #endif /* AFS_DEMAND_ATTACH_FS */
2761 /* Until we have reached an initialization level of 2
2762 * we don't know whether this volume exists or not.
2763 * We can't sleep and retry later because before a volume
2764 * is attached, the caller tries to get it first. Just
2765 * return VOFFLINE and the caller can choose whether to
2766 * retry the command or not. */
2776 IncUInt64(&VStats.hdr_gets);
2778 #ifdef AFS_DEMAND_ATTACH_FS
2779 /* block if someone else is performing an exclusive op on this volume */
2782 VCreateReservation_r(rvp);
2784 VWaitExclusiveState_r(vp);
2786 /* short circuit with VNOVOL in the following circumstances:
2789 * - VOL_STATE_SHUTTING_DOWN
2791 if ((V_attachState(vp) == VOL_STATE_ERROR) ||
2792 (V_attachState(vp) == VOL_STATE_SHUTTING_DOWN) ||
2793 (V_attachState(vp) == VOL_STATE_GOING_OFFLINE)) {
2800 * short circuit with VOFFLINE in the following circumstances:
2802 * - VOL_STATE_UNATTACHED
2804 if (V_attachState(vp) == VOL_STATE_UNATTACHED) {
2805 if (vp->specialStatus) {
2806 *ec = vp->specialStatus;
2814 /* allowable states:
2820 if (vp->salvage.requested) {
2821 VUpdateSalvagePriority_r(vp);
2824 if (V_attachState(vp) == VOL_STATE_PREATTACHED) {
2825 avp = VAttachVolumeByVp_r(ec, vp, 0);
2828 /* VAttachVolumeByVp_r can return a pointer
2829 * != the vp passed to it under certain
2830 * conditions; make sure we don't leak
2831 * reservations if that happens */
2833 VCancelReservation_r(rvp);
2835 VCreateReservation_r(rvp);
2845 if (!vp->pending_vol_op) {
2860 if ((V_attachState(vp) == VOL_STATE_SALVAGING) ||
2861 (*ec == VSALVAGING)) {
2863 /* see CheckVnode() in afsfileprocs.c for an explanation
2864 * of this error code logic */
2865 afs_uint32 now = FT_ApproxTime();
2866 if ((vp->stats.last_salvage + (10 * 60)) >= now) {
2869 *client_ec = VRESTARTING;
2878 LoadVolumeHeader(ec, vp);
2881 /* Only log the error if it was a totally unexpected error. Simply
2882 * a missing inode is likely to be caused by the volume being deleted */
2883 if (errno != ENXIO || LogLevel)
2884 Log("Volume %u: couldn't reread volume header\n",
2886 #ifdef AFS_DEMAND_ATTACH_FS
2887 if (programType == fileServer) {
2888 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
2893 #else /* AFS_DEMAND_ATTACH_FS */
2896 #endif /* AFS_DEMAND_ATTACH_FS */
2900 #ifdef AFS_DEMAND_ATTACH_FS
2902 * this test MUST happen after the volume header is loaded
2905 /* only valid before/during demand attachment */
2906 assert(!vp->pending_vol_op || vp->pending_vol_op != FSSYNC_VolOpRunningUnknown);
2908 /* deny getvolume due to running mutually exclusive vol op */
2909 if (vp->pending_vol_op && vp->pending_vol_op->vol_op_state==FSSYNC_VolOpRunningOffline) {
2911 * volume cannot remain online during this volume operation.
2914 if (vp->specialStatus) {
2916 * special status codes outrank normal VOFFLINE code
2918 *ec = vp->specialStatus;
2920 *client_ec = vp->specialStatus;
2924 /* see CheckVnode() in afsfileprocs.c for an explanation
2925 * of this error code logic */
2926 afs_uint32 now = FT_ApproxTime();
2927 if ((vp->stats.last_vol_op + (10 * 60)) >= now) {
2930 *client_ec = VRESTARTING;
2935 VChangeState_r(vp, VOL_STATE_UNATTACHED);
2936 FreeVolumeHeader(vp);
2940 #endif /* AFS_DEMAND_ATTACH_FS */
2943 if (vp->shuttingDown) {
2950 if (programType == fileServer) {
2952 if (vp->goingOffline) {
2954 #ifdef AFS_DEMAND_ATTACH_FS
2955 /* wait for the volume to go offline */
2956 if (V_attachState(vp) == VOL_STATE_GOING_OFFLINE) {
2957 VWaitStateChange_r(vp);
2959 #elif defined(AFS_PTHREAD_ENV)
2960 VOL_CV_WAIT(&vol_put_volume_cond);
2961 #else /* AFS_PTHREAD_ENV */
2962 LWP_WaitProcess(VPutVolume);
2963 #endif /* AFS_PTHREAD_ENV */
2966 if (vp->specialStatus) {
2968 *ec = vp->specialStatus;
2969 } else if (V_inService(vp) == 0 || V_blessed(vp) == 0) {
2972 } else if (V_inUse(vp) == 0) {
2983 #ifdef AFS_DEMAND_ATTACH_FS
2984 /* if no error, bump nUsers */
2987 VLRU_UpdateAccess_r(vp);
2990 VCancelReservation_r(rvp);
2993 if (client_ec && !*client_ec) {
2996 #else /* AFS_DEMAND_ATTACH_FS */
2997 /* if no error, bump nUsers */
3004 #endif /* AFS_DEMAND_ATTACH_FS */
3012 /***************************************************/
3013 /* Volume offline/detach routines */
3014 /***************************************************/
3016 /* caller MUST hold a heavyweight ref on vp */
3017 #ifdef AFS_DEMAND_ATTACH_FS
3019 VTakeOffline_r(register Volume * vp)
3023 assert(vp->nUsers > 0);
3024 assert(programType == fileServer);
3026 VCreateReservation_r(vp);
3027 VWaitExclusiveState_r(vp);
3029 vp->goingOffline = 1;
3030 V_needsSalvaged(vp) = 1;
3032 VRequestSalvage_r(&error, vp, SALVSYNC_ERROR, 0);
3033 VCancelReservation_r(vp);
3035 #else /* AFS_DEMAND_ATTACH_FS */
3037 VTakeOffline_r(register Volume * vp)
3039 assert(vp->nUsers > 0);
3040 assert(programType == fileServer);
3042 vp->goingOffline = 1;
3043 V_needsSalvaged(vp) = 1;
3045 #endif /* AFS_DEMAND_ATTACH_FS */
3048 VTakeOffline(register Volume * vp)
3056 * force a volume offline.
3058 * @param[in] vp volume object pointer
3059 * @param[in] flags flags (see note below)
3061 * @note the flag VOL_FORCEOFF_NOUPDATE is a recursion control flag
3062 * used when VUpdateVolume_r needs to call VForceOffline_r
3063 * (which in turn would normally call VUpdateVolume_r)
3065 * @see VUpdateVolume_r
3067 * @pre VOL_LOCK must be held.
3068 * for DAFS, caller must hold ref.
3070 * @note for DAFS, it _is safe_ to call this function from an
3073 * @post needsSalvaged flag is set.
3074 * for DAFS, salvage is requested.
3075 * no further references to the volume through the volume
3076 * package will be honored.
3077 * all file descriptor and vnode caches are invalidated.
3079 * @warning this is a heavy-handed interface. it results in
3080 * a volume going offline regardless of the current
3081 * reference count state.
3083 * @internal volume package internal use only
3086 VForceOffline_r(Volume * vp, int flags)
3090 #ifdef AFS_DEMAND_ATTACH_FS
3091 VChangeState_r(vp, VOL_STATE_ERROR);
3096 strcpy(V_offlineMessage(vp),
3097 "Forced offline due to internal error: volume needs to be salvaged");
3098 Log("Volume %u forced offline: it needs salvaging!\n", V_id(vp));
3101 vp->goingOffline = 0;
3102 V_needsSalvaged(vp) = 1;
3103 if (!(flags & VOL_FORCEOFF_NOUPDATE)) {
3104 VUpdateVolume_r(&error, vp, VOL_UPDATE_NOFORCEOFF);
3107 #ifdef AFS_DEMAND_ATTACH_FS
3108 VRequestSalvage_r(&error, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
3109 #endif /* AFS_DEMAND_ATTACH_FS */
3111 #ifdef AFS_PTHREAD_ENV
3112 assert(pthread_cond_broadcast(&vol_put_volume_cond) == 0);
3113 #else /* AFS_PTHREAD_ENV */
3114 LWP_NoYieldSignal(VPutVolume);
3115 #endif /* AFS_PTHREAD_ENV */
3117 VReleaseVolumeHandles_r(vp);
3121 * force a volume offline.
3123 * @param[in] vp volume object pointer
3125 * @see VForceOffline_r
3128 VForceOffline(Volume * vp)
3131 VForceOffline_r(vp, 0);
3135 /* The opposite of VAttachVolume. The volume header is written to disk, with
3136 the inUse bit turned off. A copy of the header is maintained in memory,
3137 however (which is why this is VOffline, not VDetach).
3140 VOffline_r(Volume * vp, char *message)
3143 VolumeId vid = V_id(vp);
3145 assert(programType != volumeUtility);
3150 if (V_offlineMessage(vp)[0] == '\0')
3151 strncpy(V_offlineMessage(vp), message, sizeof(V_offlineMessage(vp)));
3152 V_offlineMessage(vp)[sizeof(V_offlineMessage(vp)) - 1] = '\0';
3154 vp->goingOffline = 1;
3155 #ifdef AFS_DEMAND_ATTACH_FS
3156 VChangeState_r(vp, VOL_STATE_GOING_OFFLINE);
3157 VCreateReservation_r(vp);
3160 /* wait for the volume to go offline */
3161 if (V_attachState(vp) == VOL_STATE_GOING_OFFLINE) {
3162 VWaitStateChange_r(vp);
3164 VCancelReservation_r(vp);
3165 #else /* AFS_DEMAND_ATTACH_FS */
3167 vp = VGetVolume_r(&error, vid); /* Wait for it to go offline */
3168 if (vp) /* In case it was reattached... */
3170 #endif /* AFS_DEMAND_ATTACH_FS */
3173 #ifdef AFS_DEMAND_ATTACH_FS
3175 * Take a volume offline in order to perform a volume operation.
3177 * @param[inout] ec address in which to store error code
3178 * @param[in] vp volume object pointer
3179 * @param[in] message volume offline status message
3182 * - VOL_LOCK is held
3183 * - caller MUST hold a heavyweight ref on vp
3186 * - volume is taken offline
3187 * - if possible, volume operation is promoted to running state
3188 * - on failure, *ec is set to nonzero
3190 * @note Although this function does not return any value, it may
3191 * still fail to promote our pending volume operation to
3192 * a running state. Any caller MUST check the value of *ec,
3193 * and MUST NOT blindly assume success.
3195 * @warning if the caller does not hold a lightweight ref on vp,
3196 * then it MUST NOT reference vp after this function
3197 * returns to the caller.
3199 * @internal volume package internal use only
3202 VOfflineForVolOp_r(Error *ec, Volume *vp, char *message)
3204 assert(vp->pending_vol_op);
3210 if (V_offlineMessage(vp)[0] == '\0')
3211 strncpy(V_offlineMessage(vp), message, sizeof(V_offlineMessage(vp)));
3212 V_offlineMessage(vp)[sizeof(V_offlineMessage(vp)) - 1] = '\0';
3214 vp->goingOffline = 1;
3215 VChangeState_r(vp, VOL_STATE_GOING_OFFLINE);
3216 VCreateReservation_r(vp);
3219 /* Wait for the volume to go offline */
3220 while (!VIsOfflineState(V_attachState(vp))) {
3221 /* do not give corrupted volumes to the volserver */
3222 if (vp->salvage.requested && vp->pending_vol_op->com.programType != salvageServer) {
3226 VWaitStateChange_r(vp);
3230 VCancelReservation_r(vp);
3232 #endif /* AFS_DEMAND_ATTACH_FS */
3235 VOffline(Volume * vp, char *message)
3238 VOffline_r(vp, message);
3242 /* This gets used for the most part by utility routines that don't want
3243 * to keep all the volume headers around. Generally, the file server won't
3244 * call this routine, because then the offline message in the volume header
3245 * (or other information) won't be available to clients. For NAMEI, also
3246 * close the file handles. However, the fileserver does call this during
3247 * an attach following a volume operation.
3250 VDetachVolume_r(Error * ec, Volume * vp)
3253 struct DiskPartition64 *tpartp;
3254 int notifyServer = 0;
3255 int useDone = FSYNC_VOL_ON;
3257 *ec = 0; /* always "succeeds" */
3258 if (programType == volumeUtility) {
3259 notifyServer = vp->needsPutBack;
3260 if (V_destroyMe(vp) == DESTROY_ME)
3261 useDone = FSYNC_VOL_DONE;
3262 #ifdef AFS_DEMAND_ATTACH_FS
3263 else if (!V_blessed(vp) || !V_inService(vp))
3264 useDone = FSYNC_VOL_LEAVE_OFF;
3267 tpartp = vp->partition;
3269 DeleteVolumeFromHashTable(vp);
3270 vp->shuttingDown = 1;
3271 #ifdef AFS_DEMAND_ATTACH_FS
3272 DeleteVolumeFromVByPList_r(vp);
3274 VChangeState_r(vp, VOL_STATE_SHUTTING_DOWN);
3276 if (programType != fileServer)
3278 #endif /* AFS_DEMAND_ATTACH_FS */
3280 /* Will be detached sometime in the future--this is OK since volume is offline */
3282 /* XXX the following code should really be moved to VCheckDetach() since the volume
3283 * is not technically detached until the refcounts reach zero
3285 #ifdef FSSYNC_BUILD_CLIENT
3286 if (programType == volumeUtility && notifyServer) {
3288 * Note: The server is not notified in the case of a bogus volume
3289 * explicitly to make it possible to create a volume, do a partial
3290 * restore, then abort the operation without ever putting the volume
3291 * online. This is essential in the case of a volume move operation
3292 * between two partitions on the same server. In that case, there
3293 * would be two instances of the same volume, one of them bogus,
3294 * which the file server would attempt to put on line
3296 FSYNC_VolOp(volume, tpartp->name, useDone, 0, NULL);
3297 /* XXX this code path is only hit by volume utilities, thus
3298 * V_BreakVolumeCallbacks will always be NULL. if we really
3299 * want to break callbacks in this path we need to use FSYNC_VolOp() */
3301 /* Dettaching it so break all callbacks on it */
3302 if (V_BreakVolumeCallbacks) {
3303 Log("volume %u detached; breaking all call backs\n", volume);
3304 (*V_BreakVolumeCallbacks) (volume);
3308 #endif /* FSSYNC_BUILD_CLIENT */
3312 VDetachVolume(Error * ec, Volume * vp)
3315 VDetachVolume_r(ec, vp);
3320 /***************************************************/
3321 /* Volume fd/inode handle closing routines */
3322 /***************************************************/
3324 /* For VDetachVolume, we close all cached file descriptors, but keep
3325 * the Inode handles in case we need to read from a busy volume.
3327 /* for demand attach, caller MUST hold ref count on vp */
3329 VCloseVolumeHandles_r(Volume * vp)
3331 #ifdef AFS_DEMAND_ATTACH_FS
3332 VolState state_save;
3334 state_save = VChangeState_r(vp, VOL_STATE_OFFLINING);
3339 * XXX need to investigate whether we can perform
3340 * DFlushVolume outside of vol_glock_mutex...
3342 * VCloseVnodeFiles_r drops the glock internally */
3343 DFlushVolume(V_id(vp));
3344 VCloseVnodeFiles_r(vp);
3346 #ifdef AFS_DEMAND_ATTACH_FS
3350 /* Too time consuming and unnecessary for the volserver */
3351 if (programType != volumeUtility) {
3352 IH_CONDSYNC(vp->vnodeIndex[vLarge].handle);
3353 IH_CONDSYNC(vp->vnodeIndex[vSmall].handle);
3354 IH_CONDSYNC(vp->diskDataHandle);
3356 IH_CONDSYNC(vp->linkHandle);
3357 #endif /* AFS_NT40_ENV */
3360 IH_REALLYCLOSE(vp->vnodeIndex[vLarge].handle);
3361 IH_REALLYCLOSE(vp->vnodeIndex[vSmall].handle);
3362 IH_REALLYCLOSE(vp->diskDataHandle);
3363 IH_REALLYCLOSE(vp->linkHandle);
3365 #ifdef AFS_DEMAND_ATTACH_FS
3367 VChangeState_r(vp, state_save);
3371 /* For both VForceOffline and VOffline, we close all relevant handles.
3372 * For VOffline, if we re-attach the volume, the files may possible be
3373 * different than before.
3375 /* for demand attach, caller MUST hold a ref count on vp */
3377 VReleaseVolumeHandles_r(Volume * vp)
3379 #ifdef AFS_DEMAND_ATTACH_FS
3380 VolState state_save;
3382 state_save = VChangeState_r(vp, VOL_STATE_DETACHING);
3385 /* XXX need to investigate whether we can perform
3386 * DFlushVolume outside of vol_glock_mutex... */
3387 DFlushVolume(V_id(vp));
3389 VReleaseVnodeFiles_r(vp); /* releases the glock internally */
3391 #ifdef AFS_DEMAND_ATTACH_FS
3395 /* Too time consuming and unnecessary for the volserver */
3396 if (programType != volumeUtility) {
3397 IH_CONDSYNC(vp->vnodeIndex[vLarge].handle);
3398 IH_CONDSYNC(vp->vnodeIndex[vSmall].handle);
3399 IH_CONDSYNC(vp->diskDataHandle);
3401 IH_CONDSYNC(vp->linkHandle);
3402 #endif /* AFS_NT40_ENV */
3405 IH_RELEASE(vp->vnodeIndex[vLarge].handle);
3406 IH_RELEASE(vp->vnodeIndex[vSmall].handle);
3407 IH_RELEASE(vp->diskDataHandle);
3408 IH_RELEASE(vp->linkHandle);
3410 #ifdef AFS_DEMAND_ATTACH_FS
3412 VChangeState_r(vp, state_save);
3417 /***************************************************/
3418 /* Volume write and fsync routines */
3419 /***************************************************/
3422 VUpdateVolume_r(Error * ec, Volume * vp, int flags)
3424 #ifdef AFS_DEMAND_ATTACH_FS
3425 VolState state_save;
3427 if (flags & VOL_UPDATE_WAIT) {
3428 VCreateReservation_r(vp);
3429 VWaitExclusiveState_r(vp);
3434 if (programType == fileServer)
3436 (V_inUse(vp) ? V_nextVnodeUnique(vp) +
3437 200 : V_nextVnodeUnique(vp));
3439 #ifdef AFS_DEMAND_ATTACH_FS
3440 state_save = VChangeState_r(vp, VOL_STATE_UPDATING);
3444 WriteVolumeHeader_r(ec, vp);
3446 #ifdef AFS_DEMAND_ATTACH_FS
3448 VChangeState_r(vp, state_save);
3449 if (flags & VOL_UPDATE_WAIT) {
3450 VCancelReservation_r(vp);
3455 Log("VUpdateVolume: error updating volume header, volume %u (%s)\n",
3456 V_id(vp), V_name(vp));
3457 /* try to update on-disk header,
3458 * while preventing infinite recursion */
3459 if (!(flags & VOL_UPDATE_NOFORCEOFF)) {
3460 VForceOffline_r(vp, VOL_FORCEOFF_NOUPDATE);
3466 VUpdateVolume(Error * ec, Volume * vp)
3469 VUpdateVolume_r(ec, vp, VOL_UPDATE_WAIT);
3474 VSyncVolume_r(Error * ec, Volume * vp, int flags)
3478 #ifdef AFS_DEMAND_ATTACH_FS
3479 VolState state_save;
3482 if (flags & VOL_SYNC_WAIT) {
3483 VUpdateVolume_r(ec, vp, VOL_UPDATE_WAIT);
3485 VUpdateVolume_r(ec, vp, 0);
3488 #ifdef AFS_DEMAND_ATTACH_FS
3489 state_save = VChangeState_r(vp, VOL_STATE_UPDATING);
3492 fdP = IH_OPEN(V_diskDataHandle(vp));
3493 assert(fdP != NULL);
3494 code = FDH_SYNC(fdP);
3497 #ifdef AFS_DEMAND_ATTACH_FS
3499 VChangeState_r(vp, state_save);
3505 VSyncVolume(Error * ec, Volume * vp)
3508 VSyncVolume_r(ec, vp, VOL_SYNC_WAIT);
3513 /***************************************************/
3514 /* Volume dealloaction routines */
3515 /***************************************************/
3517 #ifdef AFS_DEMAND_ATTACH_FS
3519 FreeVolume(Volume * vp)
3521 /* free the heap space, iff it's safe.
3522 * otherwise, pull it out of the hash table, so it
3523 * will get deallocated when all refs to it go away */
3524 if (!VCheckFree(vp)) {
3525 DeleteVolumeFromHashTable(vp);
3526 DeleteVolumeFromVByPList_r(vp);
3528 /* make sure we invalidate the header cache entry */
3529 FreeVolumeHeader(vp);
3532 #endif /* AFS_DEMAND_ATTACH_FS */
3535 ReallyFreeVolume(Volume * vp)
3540 #ifdef AFS_DEMAND_ATTACH_FS
3542 VChangeState_r(vp, VOL_STATE_FREED);
3543 if (vp->pending_vol_op)
3544 free(vp->pending_vol_op);
3545 #endif /* AFS_DEMAND_ATTACH_FS */
3546 for (i = 0; i < nVNODECLASSES; i++)
3547 if (vp->vnodeIndex[i].bitmap)
3548 free(vp->vnodeIndex[i].bitmap);
3549 FreeVolumeHeader(vp);
3550 #ifndef AFS_DEMAND_ATTACH_FS
3551 DeleteVolumeFromHashTable(vp);
3552 #endif /* AFS_DEMAND_ATTACH_FS */
3556 /* check to see if we should shutdown this volume
3557 * returns 1 if volume was freed, 0 otherwise */
3558 #ifdef AFS_DEMAND_ATTACH_FS
3560 VCheckDetach(register Volume * vp)
3565 if (vp->nUsers || vp->nWaiters)
3568 if (vp->shuttingDown) {
3570 if ((programType != fileServer) &&
3571 (V_inUse(vp) == programType) &&
3572 ((V_checkoutMode(vp) == V_VOLUPD) ||
3573 ((V_checkoutMode(vp) == V_CLONE) &&
3574 (VolumeWriteable(vp))))) {
3576 VUpdateVolume_r(&ec, vp, VOL_UPDATE_NOFORCEOFF);
3578 Log("VCheckDetach: volume header update for volume %u "
3579 "failed with errno %d\n", vp->hashid, errno);
3582 VReleaseVolumeHandles_r(vp);
3584 ReallyFreeVolume(vp);
3585 if (programType == fileServer) {
3586 assert(pthread_cond_broadcast(&vol_put_volume_cond) == 0);
3591 #else /* AFS_DEMAND_ATTACH_FS */
3593 VCheckDetach(register Volume * vp)
3601 if (vp->shuttingDown) {
3603 if ((programType != fileServer) &&
3604 (V_inUse(vp) == programType) &&
3605 ((V_checkoutMode(vp) == V_VOLUPD) ||
3606 ((V_checkoutMode(vp) == V_CLONE) &&
3607 (VolumeWriteable(vp))))) {
3609 VUpdateVolume_r(&ec, vp, VOL_UPDATE_NOFORCEOFF);
3611 Log("VCheckDetach: volume header update for volume %u failed with errno %d\n",
3615 VReleaseVolumeHandles_r(vp);
3616 ReallyFreeVolume(vp);
3617 if (programType == fileServer) {
3618 #if defined(AFS_PTHREAD_ENV)
3619 assert(pthread_cond_broadcast(&vol_put_volume_cond) == 0);
3620 #else /* AFS_PTHREAD_ENV */
3621 LWP_NoYieldSignal(VPutVolume);
3622 #endif /* AFS_PTHREAD_ENV */
3627 #endif /* AFS_DEMAND_ATTACH_FS */
3629 /* check to see if we should offline this volume
3630 * return 1 if volume went offline, 0 otherwise */
3631 #ifdef AFS_DEMAND_ATTACH_FS
3633 VCheckOffline(register Volume * vp)
3635 Volume * rvp = NULL;
3638 if (vp->goingOffline && !vp->nUsers) {
3640 assert(programType == fileServer);
3641 assert((V_attachState(vp) != VOL_STATE_ATTACHED) &&
3642 (V_attachState(vp) != VOL_STATE_FREED) &&
3643 (V_attachState(vp) != VOL_STATE_PREATTACHED) &&
3644 (V_attachState(vp) != VOL_STATE_UNATTACHED));
3648 * VOL_STATE_GOING_OFFLINE
3649 * VOL_STATE_SHUTTING_DOWN
3650 * VIsErrorState(V_attachState(vp))
3651 * VIsExclusiveState(V_attachState(vp))
3654 VCreateReservation_r(vp);
3655 VChangeState_r(vp, VOL_STATE_OFFLINING);
3658 /* must clear the goingOffline flag before we drop the glock */
3659 vp->goingOffline = 0;
3664 /* perform async operations */
3665 VUpdateVolume_r(&error, vp, 0);
3666 VCloseVolumeHandles_r(vp);
3669 Log("VOffline: Volume %u (%s) is now offline", V_id(vp),
3671 if (V_offlineMessage(vp)[0])
3672 Log(" (%s)", V_offlineMessage(vp));
3676 /* invalidate the volume header cache entry */
3677 FreeVolumeHeader(vp);
3679 /* if nothing changed state to error or salvaging,
3680 * drop state to unattached */
3681 if (!VIsErrorState(V_attachState(vp))) {
3682 VChangeState_r(vp, VOL_STATE_UNATTACHED);
3684 VCancelReservation_r(vp);
3685 /* no usage of vp is safe beyond this point */
3689 #else /* AFS_DEMAND_ATTACH_FS */
3691 VCheckOffline(register Volume * vp)
3693 Volume * rvp = NULL;
3696 if (vp->goingOffline && !vp->nUsers) {
3698 assert(programType == fileServer);
3701 vp->goingOffline = 0;
3703 VUpdateVolume_r(&error, vp, 0);
3704 VCloseVolumeHandles_r(vp);
3706 Log("VOffline: Volume %u (%s) is now offline", V_id(vp),
3708 if (V_offlineMessage(vp)[0])
3709 Log(" (%s)", V_offlineMessage(vp));
3712 FreeVolumeHeader(vp);
3713 #ifdef AFS_PTHREAD_ENV
3714 assert(pthread_cond_broadcast(&vol_put_volume_cond) == 0);
3715 #else /* AFS_PTHREAD_ENV */
3716 LWP_NoYieldSignal(VPutVolume);
3717 #endif /* AFS_PTHREAD_ENV */
3721 #endif /* AFS_DEMAND_ATTACH_FS */
3723 /***************************************************/
3724 /* demand attach fs ref counting routines */
3725 /***************************************************/
3727 #ifdef AFS_DEMAND_ATTACH_FS
3728 /* the following two functions handle reference counting for
3729 * asynchronous operations on volume structs.
3731 * their purpose is to prevent a VDetachVolume or VShutdown
3732 * from free()ing the Volume struct during an async i/o op */
3734 /* register with the async volume op ref counter */
3735 /* VCreateReservation_r moved into inline code header because it
3736 * is now needed in vnode.c -- tkeiser 11/20/2007
3740 * decrement volume-package internal refcount.
3742 * @param vp volume object pointer
3744 * @internal volume package internal use only
3747 * @arg VOL_LOCK is held
3748 * @arg lightweight refcount held
3750 * @post volume waiters refcount is decremented; volume may
3751 * have been deallocated/shutdown/offlined/salvaged/
3752 * whatever during the process
3754 * @warning once you have tossed your last reference (you can acquire
3755 * lightweight refs recursively) it is NOT SAFE to reference
3756 * a volume object pointer ever again
3758 * @see VCreateReservation_r
3760 * @note DEMAND_ATTACH_FS only
3763 VCancelReservation_r(Volume * vp)
3765 assert(--vp->nWaiters >= 0);
3766 if (vp->nWaiters == 0) {
3768 if (!VCheckDetach(vp)) {
3775 /* check to see if we should free this volume now
3776 * return 1 if volume was freed, 0 otherwise */
3778 VCheckFree(Volume * vp)
3781 if ((vp->nUsers == 0) &&
3782 (vp->nWaiters == 0) &&
3783 !(V_attachFlags(vp) & (VOL_IN_HASH |
3787 ReallyFreeVolume(vp);
3792 #endif /* AFS_DEMAND_ATTACH_FS */
3795 /***************************************************/
3796 /* online volume operations routines */
3797 /***************************************************/
3799 #ifdef AFS_DEMAND_ATTACH_FS
3801 * register a volume operation on a given volume.
3803 * @param[in] vp volume object
3804 * @param[in] vopinfo volume operation info object
3806 * @pre VOL_LOCK is held
3808 * @post volume operation info object attached to volume object.
3809 * volume operation statistics updated.