2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
9 * Portions Copyright (c) 2005-2008 Sine Nomine Associates
12 /* 1/1/89: NB: this stuff is all going to be replaced. Don't take it too seriously */
17 Institution: The Information Technology Center, Carnegie-Mellon University
21 #include <afsconfig.h>
22 #include <afs/param.h>
28 #include <afs/afsint.h>
31 #include <sys/param.h>
32 #if !defined(AFS_SGI_ENV)
35 #else /* AFS_OSF_ENV */
36 #ifdef AFS_VFSINCL_ENV
39 #include <sys/fs/ufs_fs.h>
41 #if defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
42 #include <ufs/ufs/dinode.h>
43 #include <ufs/ffs/fs.h>
48 #else /* AFS_VFSINCL_ENV */
49 #if !defined(AFS_AIX_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_XBSD_ENV)
52 #endif /* AFS_VFSINCL_ENV */
53 #endif /* AFS_OSF_ENV */
54 #endif /* AFS_SGI_ENV */
55 #endif /* AFS_NT40_ENV */
73 #if defined(AFS_SUN_ENV) || defined(AFS_SUN5_ENV)
75 #include <sys/mnttab.h>
76 #include <sys/mntent.h>
82 #if defined(AFS_SGI_ENV)
87 #ifndef AFS_LINUX20_ENV
88 #include <fstab.h> /* Need to find in libc 5, present in libc 6 */
91 #endif /* AFS_SGI_ENV */
93 #endif /* AFS_HPUX_ENV */
97 #include <netinet/in.h>
101 #include <sys/time.h>
102 #endif /* ITIMER_REAL */
103 #endif /* AFS_NT40_ENV */
104 #if defined(AFS_SUN5_ENV) || defined(AFS_NT40_ENV) || defined(AFS_LINUX20_ENV)
111 #include <afs/errors.h>
114 #include <afs/afssyscalls.h>
116 #include <afs/afsutil.h>
120 #include "daemon_com.h"
122 #include "salvsync.h"
125 #include "partition.h"
126 #include "volume_inline.h"
127 #ifdef AFS_PTHREAD_ENV
129 #else /* AFS_PTHREAD_ENV */
130 #include "afs/assert.h"
131 #endif /* AFS_PTHREAD_ENV */
138 #if !defined(offsetof)
143 #define afs_stat stat64
144 #define afs_fstat fstat64
145 #define afs_open open64
146 #else /* !O_LARGEFILE */
147 #define afs_stat stat
148 #define afs_fstat fstat
149 #define afs_open open
150 #endif /* !O_LARGEFILE */
152 #ifdef AFS_PTHREAD_ENV
153 pthread_mutex_t vol_glock_mutex;
154 pthread_mutex_t vol_trans_mutex;
155 pthread_cond_t vol_put_volume_cond;
156 pthread_cond_t vol_sleep_cond;
157 int vol_attach_threads = 1;
158 #endif /* AFS_PTHREAD_ENV */
160 #ifdef AFS_DEMAND_ATTACH_FS
161 pthread_mutex_t vol_salvsync_mutex;
162 #endif /* AFS_DEMAND_ATTACH_FS */
165 extern void *calloc(), *realloc();
168 /*@printflike@*/ extern void Log(const char *format, ...);
170 /* Forward declarations */
171 static Volume *attach2(Error * ec, VolId vid, char *path,
172 register struct VolumeHeader *header,
173 struct DiskPartition *partp, Volume * vp,
174 int isbusy, int mode);
175 static void ReallyFreeVolume(Volume * vp);
176 #ifdef AFS_DEMAND_ATTACH_FS
177 static void FreeVolume(Volume * vp);
178 #else /* !AFS_DEMAND_ATTACH_FS */
179 #define FreeVolume(vp) ReallyFreeVolume(vp)
180 static void VScanUpdateList(void);
181 #endif /* !AFS_DEMAND_ATTACH_FS */
182 static void VInitVolumeHeaderCache(afs_uint32 howMany);
183 static int GetVolumeHeader(register Volume * vp);
184 static void ReleaseVolumeHeader(register struct volHeader *hd);
185 static void FreeVolumeHeader(register Volume * vp);
186 static void AddVolumeToHashTable(register Volume * vp, int hashid);
187 static void DeleteVolumeFromHashTable(register Volume * vp);
188 static int VHold(Volume * vp);
189 static int VHold_r(Volume * vp);
190 static void VGetBitmap_r(Error * ec, Volume * vp, VnodeClass class);
191 static void GetVolumePath(Error * ec, VolId volumeId, char **partitionp,
193 static void VReleaseVolumeHandles_r(Volume * vp);
194 static void VCloseVolumeHandles_r(Volume * vp);
195 static void LoadVolumeHeader(Error * ec, Volume * vp);
196 static int VCheckOffline(register Volume * vp);
197 static int VCheckDetach(register Volume * vp);
198 static Volume * GetVolume(Error * ec, Error * client_ec, VolId volumeId, Volume * hint, int flags);
199 static int VolumeExternalName_r(VolumeId volumeId, char * name, size_t len);
201 int LogLevel; /* Vice loglevel--not defined as extern so that it will be
202 * defined when not linked with vice, XXXX */
203 ProgramType programType; /* The type of program using the package */
205 /* extended volume package statistics */
208 #ifdef VOL_LOCK_DEBUG
209 pthread_t vol_glock_holder = 0;
213 #define VOLUME_BITMAP_GROWSIZE 16 /* bytes, => 128vnodes */
214 /* Must be a multiple of 4 (1 word) !! */
216 /* this parameter needs to be tunable at runtime.
217 * 128 was really inadequate for largish servers -- at 16384 volumes this
218 * puts average chain length at 128, thus an average 65 deref's to find a volptr.
219 * talk about bad spatial locality...
221 * an AVL or splay tree might work a lot better, but we'll just increase
222 * the default hash table size for now
224 #define DEFAULT_VOLUME_HASH_SIZE 256 /* Must be a power of 2!! */
225 #define DEFAULT_VOLUME_HASH_MASK (DEFAULT_VOLUME_HASH_SIZE-1)
226 #define VOLUME_HASH(volumeId) (volumeId&(VolumeHashTable.Mask))
229 * turn volume hash chains into partially ordered lists.
230 * when the threshold is exceeded between two adjacent elements,
231 * perform a chain rebalancing operation.
233 * keep the threshold high in order to keep cache line invalidates
234 * low "enough" on SMPs
236 #define VOLUME_HASH_REORDER_THRESHOLD 200
239 * when possible, don't just reorder single elements, but reorder
240 * entire chains of elements at once. a chain of elements that
241 * exceed the element previous to the pivot by at least CHAIN_THRESH
242 * accesses are moved in front of the chain whose elements have at
243 * least CHAIN_THRESH less accesses than the pivot element
245 #define VOLUME_HASH_REORDER_CHAIN_THRESH (VOLUME_HASH_REORDER_THRESHOLD / 2)
247 #include "rx/rx_queue.h"
250 VolumeHashTable_t VolumeHashTable = {
251 DEFAULT_VOLUME_HASH_SIZE,
252 DEFAULT_VOLUME_HASH_MASK,
257 static void VInitVolumeHash(void);
261 /* This macro is used where an ffs() call does not exist. Was in util/ffs.c */
265 afs_int32 ffs_tmp = x;
269 for (ffs_i = 1;; ffs_i++) {
276 #endif /* !AFS_HAVE_FFS */
278 #ifdef AFS_PTHREAD_ENV
279 typedef struct diskpartition_queue_t {
280 struct rx_queue queue;
281 struct DiskPartition * diskP;
282 } diskpartition_queue_t;
283 typedef struct vinitvolumepackage_thread_t {
284 struct rx_queue queue;
285 pthread_cond_t thread_done_cv;
286 int n_threads_complete;
287 } vinitvolumepackage_thread_t;
288 static void * VInitVolumePackageThread(void * args);
289 #endif /* AFS_PTHREAD_ENV */
291 static int VAttachVolumesByPartition(struct DiskPartition *diskP,
292 int * nAttached, int * nUnattached);
295 #ifdef AFS_DEMAND_ATTACH_FS
296 /* demand attach fileserver extensions */
299 * in the future we will support serialization of VLRU state into the fs_state
302 * these structures are the beginning of that effort
304 struct VLRU_DiskHeader {
305 struct versionStamp stamp; /* magic and structure version number */
306 afs_uint32 mtime; /* time of dump to disk */
307 afs_uint32 num_records; /* number of VLRU_DiskEntry records */
310 struct VLRU_DiskEntry {
311 afs_uint32 vid; /* volume ID */
312 afs_uint32 idx; /* generation */
313 afs_uint32 last_get; /* timestamp of last get */
316 struct VLRU_StartupQueue {
317 struct VLRU_DiskEntry * entry;
322 typedef struct vshutdown_thread_t {
324 pthread_mutex_t lock;
326 pthread_cond_t master_cv;
328 int n_threads_complete;
330 int schedule_version;
333 byte n_parts_done_pass;
334 byte part_thread_target[VOLMAXPARTS+1];
335 byte part_done_pass[VOLMAXPARTS+1];
336 struct rx_queue * part_pass_head[VOLMAXPARTS+1];
337 int stats[4][VOLMAXPARTS+1];
338 } vshutdown_thread_t;
339 static void * VShutdownThread(void * args);
342 static Volume * VAttachVolumeByVp_r(Error * ec, Volume * vp, int mode);
343 static int VCheckFree(Volume * vp);
346 static void AddVolumeToVByPList_r(Volume * vp);
347 static void DeleteVolumeFromVByPList_r(Volume * vp);
348 static void VVByPListBeginExclusive_r(struct DiskPartition * dp);
349 static void VVByPListEndExclusive_r(struct DiskPartition * dp);
350 static void VVByPListWait_r(struct DiskPartition * dp);
352 /* online salvager */
353 static int VCheckSalvage(register Volume * vp);
354 static int VUpdateSalvagePriority_r(Volume * vp);
355 static int VScheduleSalvage_r(Volume * vp);
356 static int VCancelSalvage_r(Volume * vp, int reason);
358 /* Volume hash table */
359 static void VReorderHash_r(VolumeHashChainHead * head, Volume * pp, Volume * vp);
360 static void VHashBeginExclusive_r(VolumeHashChainHead * head);
361 static void VHashEndExclusive_r(VolumeHashChainHead * head);
362 static void VHashWait_r(VolumeHashChainHead * head);
365 static int ShutdownVByPForPass_r(struct DiskPartition * dp, int pass);
366 static int ShutdownVolumeWalk_r(struct DiskPartition * dp, int pass,
367 struct rx_queue ** idx);
368 static void ShutdownController(vshutdown_thread_t * params);
369 static void ShutdownCreateSchedule(vshutdown_thread_t * params);
372 static void VLRU_ComputeConstants(void);
373 static void VInitVLRU(void);
374 static void VLRU_Init_Node_r(volatile Volume * vp);
375 static void VLRU_Add_r(volatile Volume * vp);
376 static void VLRU_Delete_r(volatile Volume * vp);
377 static void VLRU_UpdateAccess_r(volatile Volume * vp);
378 static void * VLRU_ScannerThread(void * args);
379 static void VLRU_Scan_r(int idx);
380 static void VLRU_Promote_r(int idx);
381 static void VLRU_Demote_r(int idx);
382 static void VLRU_SwitchQueues(volatile Volume * vp, int new_idx, int append);
385 static int VCheckSoftDetach(volatile Volume * vp, afs_uint32 thresh);
386 static int VCheckSoftDetachCandidate(volatile Volume * vp, afs_uint32 thresh);
387 static int VSoftDetachVolume_r(volatile Volume * vp, afs_uint32 thresh);
388 #endif /* AFS_DEMAND_ATTACH_FS */
391 struct Lock vol_listLock; /* Lock obtained when listing volumes:
392 * prevents a volume from being missed
393 * if the volume is attached during a
397 static int TimeZoneCorrection; /* Number of seconds west of GMT */
399 /* Common message used when the volume goes off line */
400 char *VSalvageMessage =
401 "Files in this volume are currently unavailable; call operations";
403 int VInit; /* 0 - uninitialized,
404 * 1 - initialized but not all volumes have been attached,
405 * 2 - initialized and all volumes have been attached,
406 * 3 - initialized, all volumes have been attached, and
407 * VConnectFS() has completed. */
410 bit32 VolumeCacheCheck; /* Incremented everytime a volume goes on line--
411 * used to stamp volume headers and in-core
412 * vnodes. When the volume goes on-line the
413 * vnode will be invalidated
414 * access only with VOL_LOCK held */
419 /***************************************************/
420 /* Startup routines */
421 /***************************************************/
424 VInitVolumePackage(ProgramType pt, afs_uint32 nLargeVnodes, afs_uint32 nSmallVnodes,
425 int connect, afs_uint32 volcache)
427 int errors = 0; /* Number of errors while finding vice partitions. */
433 memset(&VStats, 0, sizeof(VStats));
434 VStats.hdr_cache_size = 200;
436 VInitPartitionPackage();
438 #ifdef AFS_DEMAND_ATTACH_FS
439 if (programType == fileServer) {
442 VLRU_SetOptions(VLRU_SET_ENABLED, 0);
446 #ifdef AFS_PTHREAD_ENV
447 assert(pthread_mutex_init(&vol_glock_mutex, NULL) == 0);
448 assert(pthread_mutex_init(&vol_trans_mutex, NULL) == 0);
449 assert(pthread_cond_init(&vol_put_volume_cond, NULL) == 0);
450 assert(pthread_cond_init(&vol_sleep_cond, NULL) == 0);
451 #else /* AFS_PTHREAD_ENV */
453 #endif /* AFS_PTHREAD_ENV */
454 Lock_Init(&vol_listLock);
456 srandom(time(0)); /* For VGetVolumeInfo */
457 gettimeofday(&tv, &tz);
458 TimeZoneCorrection = tz.tz_minuteswest * 60;
460 #ifdef AFS_DEMAND_ATTACH_FS
461 assert(pthread_mutex_init(&vol_salvsync_mutex, NULL) == 0);
462 #endif /* AFS_DEMAND_ATTACH_FS */
464 /* Ok, we have done enough initialization that fileserver can
465 * start accepting calls, even though the volumes may not be
466 * available just yet.
470 #if defined(AFS_DEMAND_ATTACH_FS) && defined(SALVSYNC_BUILD_SERVER)
471 if (programType == salvageServer) {
474 #endif /* AFS_DEMAND_ATTACH_FS */
475 #ifdef FSSYNC_BUILD_SERVER
476 if (programType == fileServer) {
480 #if defined(AFS_DEMAND_ATTACH_FS) && defined(SALVSYNC_BUILD_CLIENT)
481 if (programType == fileServer) {
482 /* establish a connection to the salvager at this point */
483 assert(VConnectSALV() != 0);
485 #endif /* AFS_DEMAND_ATTACH_FS */
487 if (volcache > VStats.hdr_cache_size)
488 VStats.hdr_cache_size = volcache;
489 VInitVolumeHeaderCache(VStats.hdr_cache_size);
491 VInitVnodes(vLarge, nLargeVnodes);
492 VInitVnodes(vSmall, nSmallVnodes);
495 errors = VAttachPartitions();
499 if (programType == fileServer) {
500 struct DiskPartition *diskP;
501 #ifdef AFS_PTHREAD_ENV
502 struct vinitvolumepackage_thread_t params;
503 struct diskpartition_queue_t * dpq;
504 int i, threads, parts;
506 pthread_attr_t attrs;
508 assert(pthread_cond_init(¶ms.thread_done_cv,NULL) == 0);
510 params.n_threads_complete = 0;
512 /* create partition work queue */
513 for (parts=0, diskP = DiskPartitionList; diskP; diskP = diskP->next, parts++) {
514 dpq = (diskpartition_queue_t *) malloc(sizeof(struct diskpartition_queue_t));
517 queue_Append(¶ms,dpq);
520 threads = MIN(parts, vol_attach_threads);
523 /* spawn off a bunch of initialization threads */
524 assert(pthread_attr_init(&attrs) == 0);
525 assert(pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED) == 0);
527 Log("VInitVolumePackage: beginning parallel fileserver startup\n");
528 #ifdef AFS_DEMAND_ATTACH_FS
529 Log("VInitVolumePackage: using %d threads to pre-attach volumes on %d partitions\n",
531 #else /* AFS_DEMAND_ATTACH_FS */
532 Log("VInitVolumePackage: using %d threads to attach volumes on %d partitions\n",
534 #endif /* AFS_DEMAND_ATTACH_FS */
537 for (i=0; i < threads; i++) {
538 assert(pthread_create
539 (&tid, &attrs, &VInitVolumePackageThread,
543 while(params.n_threads_complete < threads) {
544 VOL_CV_WAIT(¶ms.thread_done_cv);
548 assert(pthread_attr_destroy(&attrs) == 0);
550 /* if we're only going to run one init thread, don't bother creating
552 Log("VInitVolumePackage: beginning single-threaded fileserver startup\n");
553 #ifdef AFS_DEMAND_ATTACH_FS
554 Log("VInitVolumePackage: using 1 thread to pre-attach volumes on %d partition(s)\n",
556 #else /* AFS_DEMAND_ATTACH_FS */
557 Log("VInitVolumePackage: using 1 thread to attach volumes on %d partition(s)\n",
559 #endif /* AFS_DEMAND_ATTACH_FS */
561 VInitVolumePackageThread(¶ms);
564 assert(pthread_cond_destroy(¶ms.thread_done_cv) == 0);
566 #else /* AFS_PTHREAD_ENV */
570 /* Attach all the volumes in this partition */
571 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
572 int nAttached = 0, nUnattached = 0;
573 assert(VAttachVolumesByPartition(diskP, &nAttached, &nUnattached) == 0);
575 #endif /* AFS_PTHREAD_ENV */
578 VInit = 2; /* Initialized, and all volumes have been attached */
579 #ifdef FSSYNC_BUILD_CLIENT
580 if (programType == volumeUtility && connect) {
582 Log("Unable to connect to file server; aborted\n");
586 #ifdef AFS_DEMAND_ATTACH_FS
587 else if (programType == salvageServer) {
589 Log("Unable to connect to file server; aborted\n");
593 #endif /* AFS_DEMAND_ATTACH_FS */
594 #endif /* FSSYNC_BUILD_CLIENT */
598 #ifdef AFS_PTHREAD_ENV
600 VInitVolumePackageThread(void * args) {
601 int errors = 0; /* Number of errors while finding vice partitions. */
605 struct DiskPartition *diskP;
606 struct vinitvolumepackage_thread_t * params;
607 struct diskpartition_queue_t * dpq;
609 params = (vinitvolumepackage_thread_t *) args;
613 /* Attach all the volumes in this partition */
614 while (queue_IsNotEmpty(params)) {
615 int nAttached = 0, nUnattached = 0;
617 dpq = queue_First(params,diskpartition_queue_t);
623 assert(VAttachVolumesByPartition(diskP, &nAttached, &nUnattached) == 0);
628 params->n_threads_complete++;
629 pthread_cond_signal(¶ms->thread_done_cv);
633 #endif /* AFS_PTHREAD_ENV */
636 * attach all volumes on a given disk partition
639 VAttachVolumesByPartition(struct DiskPartition *diskP, int * nAttached, int * nUnattached)
645 Log("Partition %s: attaching volumes\n", diskP->name);
646 dirp = opendir(VPartitionPath(diskP));
648 Log("opendir on Partition %s failed!\n", diskP->name);
652 while ((dp = readdir(dirp))) {
654 p = strrchr(dp->d_name, '.');
655 if (p != NULL && strcmp(p, VHDREXT) == 0) {
658 #ifdef AFS_DEMAND_ATTACH_FS
659 vp = VPreAttachVolumeByName(&error, diskP->name, dp->d_name);
660 #else /* AFS_DEMAND_ATTACH_FS */
661 vp = VAttachVolumeByName(&error, diskP->name, dp->d_name,
663 #endif /* AFS_DEMAND_ATTACH_FS */
664 (*(vp ? nAttached : nUnattached))++;
665 if (error == VOFFLINE)
666 Log("Volume %d stays offline (/vice/offline/%s exists)\n", VolumeNumber(dp->d_name), dp->d_name);
667 else if (LogLevel >= 5) {
668 Log("Partition %s: attached volume %d (%s)\n",
669 diskP->name, VolumeNumber(dp->d_name),
672 #if !defined(AFS_DEMAND_ATTACH_FS)
676 #endif /* AFS_DEMAND_ATTACH_FS */
680 Log("Partition %s: attached %d volumes; %d volumes not attached\n", diskP->name, *nAttached, *nUnattached);
686 /***************************************************/
687 /* Shutdown routines */
688 /***************************************************/
692 * highly multithreaded volume package shutdown
694 * with the demand attach fileserver extensions,
695 * VShutdown has been modified to be multithreaded.
696 * In order to achieve optimal use of many threads,
697 * the shutdown code involves one control thread and
698 * n shutdown worker threads. The control thread
699 * periodically examines the number of volumes available
700 * for shutdown on each partition, and produces a worker
701 * thread allocation schedule. The idea is to eliminate
702 * redundant scheduling computation on the workers by
703 * having a single master scheduler.
705 * The scheduler's objectives are:
707 * each partition with volumes remaining gets allocated
708 * at least 1 thread (assuming sufficient threads)
710 * threads are allocated proportional to the number of
711 * volumes remaining to be offlined. This ensures that
712 * the OS I/O scheduler has many requests to elevator
713 * seek on partitions that will (presumably) take the
714 * longest amount of time (from now) to finish shutdown
715 * (3) keep threads busy
716 * when there are extra threads, they are assigned to
717 * partitions using a simple round-robin algorithm
719 * In the future, we may wish to add the ability to adapt
720 * to the relative performance patterns of each disk
725 * multi-step shutdown process
727 * demand attach shutdown is a four-step process. Each
728 * shutdown "pass" shuts down increasingly more difficult
729 * volumes. The main purpose is to achieve better cache
730 * utilization during shutdown.
733 * shutdown volumes in the unattached, pre-attached
736 * shutdown attached volumes with cached volume headers
738 * shutdown all volumes in non-exclusive states
740 * shutdown all remaining volumes
747 register Volume *vp, *np;
748 register afs_int32 code;
749 #ifdef AFS_DEMAND_ATTACH_FS
750 struct DiskPartition * diskP;
751 struct diskpartition_queue_t * dpq;
752 vshutdown_thread_t params;
754 pthread_attr_t attrs;
756 memset(¶ms, 0, sizeof(vshutdown_thread_t));
758 for (params.n_parts=0, diskP = DiskPartitionList;
759 diskP; diskP = diskP->next, params.n_parts++);
761 Log("VShutdown: shutting down on-line volumes on %d partition%s...\n",
762 params.n_parts, params.n_parts > 1 ? "s" : "");
764 if (vol_attach_threads > 1) {
765 /* prepare for parallel shutdown */
766 params.n_threads = vol_attach_threads;
767 assert(pthread_mutex_init(¶ms.lock, NULL) == 0);
768 assert(pthread_cond_init(¶ms.cv, NULL) == 0);
769 assert(pthread_cond_init(¶ms.master_cv, NULL) == 0);
770 assert(pthread_attr_init(&attrs) == 0);
771 assert(pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED) == 0);
774 /* setup the basic partition information structures for
775 * parallel shutdown */
776 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
778 struct rx_queue * qp, * nqp;
782 VVByPListWait_r(diskP);
783 VVByPListBeginExclusive_r(diskP);
786 for (queue_Scan(&diskP->vol_list, qp, nqp, rx_queue)) {
787 vp = (Volume *)((char *)qp - offsetof(Volume, vol_list));
791 Log("VShutdown: partition %s has %d volumes with attached headers\n",
792 VPartitionPath(diskP), count);
795 /* build up the pass 0 shutdown work queue */
796 dpq = (struct diskpartition_queue_t *) malloc(sizeof(struct diskpartition_queue_t));
799 queue_Prepend(¶ms, dpq);
801 params.part_pass_head[diskP->device] = queue_First(&diskP->vol_list, rx_queue);
804 Log("VShutdown: beginning parallel fileserver shutdown\n");
805 Log("VShutdown: using %d threads to offline volumes on %d partition%s\n",
806 vol_attach_threads, params.n_parts, params.n_parts > 1 ? "s" : "" );
808 /* do pass 0 shutdown */
809 assert(pthread_mutex_lock(¶ms.lock) == 0);
810 for (i=0; i < params.n_threads; i++) {
811 assert(pthread_create
812 (&tid, &attrs, &VShutdownThread,
816 /* wait for all the pass 0 shutdowns to complete */
817 while (params.n_threads_complete < params.n_threads) {
818 assert(pthread_cond_wait(¶ms.master_cv, ¶ms.lock) == 0);
820 params.n_threads_complete = 0;
822 assert(pthread_cond_broadcast(¶ms.cv) == 0);
823 assert(pthread_mutex_unlock(¶ms.lock) == 0);
825 Log("VShutdown: pass 0 completed using the 1 thread per partition algorithm\n");
826 Log("VShutdown: starting passes 1 through 3 using finely-granular mp-fast algorithm\n");
828 /* run the parallel shutdown scheduler. it will drop the glock internally */
829 ShutdownController(¶ms);
831 /* wait for all the workers to finish pass 3 and terminate */
832 while (params.pass < 4) {
833 VOL_CV_WAIT(¶ms.cv);
836 assert(pthread_attr_destroy(&attrs) == 0);
837 assert(pthread_cond_destroy(¶ms.cv) == 0);
838 assert(pthread_cond_destroy(¶ms.master_cv) == 0);
839 assert(pthread_mutex_destroy(¶ms.lock) == 0);
841 /* drop the VByPList exclusive reservations */
842 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
843 VVByPListEndExclusive_r(diskP);
844 Log("VShutdown: %s stats : (pass[0]=%d, pass[1]=%d, pass[2]=%d, pass[3]=%d)\n",
845 VPartitionPath(diskP),
846 params.stats[0][diskP->device],
847 params.stats[1][diskP->device],
848 params.stats[2][diskP->device],
849 params.stats[3][diskP->device]);
852 Log("VShutdown: shutdown finished using %d threads\n", params.n_threads);
854 /* if we're only going to run one shutdown thread, don't bother creating
856 Log("VShutdown: beginning single-threaded fileserver shutdown\n");
858 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
859 VShutdownByPartition_r(diskP);
863 Log("VShutdown: complete.\n");
864 #else /* AFS_DEMAND_ATTACH_FS */
865 Log("VShutdown: shutting down on-line volumes...\n");
866 for (i = 0; i < VolumeHashTable.Size; i++) {
867 /* try to hold first volume in the hash table */
868 for (queue_Scan(&VolumeHashTable.Table[i],vp,np,Volume)) {
872 Log("VShutdown: Attempting to take volume %u offline.\n",
875 /* next, take the volume offline (drops reference count) */
876 VOffline_r(vp, "File server was shut down");
880 Log("VShutdown: complete.\n");
881 #endif /* AFS_DEMAND_ATTACH_FS */
892 #ifdef AFS_DEMAND_ATTACH_FS
895 * shutdown control thread
898 ShutdownController(vshutdown_thread_t * params)
901 struct DiskPartition * diskP;
903 vshutdown_thread_t shadow;
905 ShutdownCreateSchedule(params);
907 while ((params->pass < 4) &&
908 (params->n_threads_complete < params->n_threads)) {
909 /* recompute schedule once per second */
911 memcpy(&shadow, params, sizeof(vshutdown_thread_t));
915 Log("ShutdownController: schedule version=%d, vol_remaining=%d, pass=%d\n",
916 shadow.schedule_version, shadow.vol_remaining, shadow.pass);
917 Log("ShutdownController: n_threads_complete=%d, n_parts_done_pass=%d\n",
918 shadow.n_threads_complete, shadow.n_parts_done_pass);
919 for (diskP = DiskPartitionList; diskP; diskP=diskP->next) {
921 Log("ShutdownController: part[%d] : (len=%d, thread_target=%d, done_pass=%d, pass_head=%p)\n",
924 shadow.part_thread_target[id],
925 shadow.part_done_pass[id],
926 shadow.part_pass_head[id]);
932 ShutdownCreateSchedule(params);
936 /* create the shutdown thread work schedule.
937 * this scheduler tries to implement fairness
938 * by allocating at least 1 thread to each
939 * partition with volumes to be shutdown,
940 * and then it attempts to allocate remaining
941 * threads based upon the amount of work left
944 ShutdownCreateSchedule(vshutdown_thread_t * params)
946 struct DiskPartition * diskP;
947 int sum, thr_workload, thr_left;
948 int part_residue[VOLMAXPARTS+1];
951 /* compute the total number of outstanding volumes */
953 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
954 sum += diskP->vol_list.len;
957 params->schedule_version++;
958 params->vol_remaining = sum;
963 /* compute average per-thread workload */
964 thr_workload = sum / params->n_threads;
965 if (sum % params->n_threads)
968 thr_left = params->n_threads;
969 memset(&part_residue, 0, sizeof(part_residue));
971 /* for fairness, give every partition with volumes remaining
972 * at least one thread */
973 for (diskP = DiskPartitionList; diskP && thr_left; diskP = diskP->next) {
975 if (diskP->vol_list.len) {
976 params->part_thread_target[id] = 1;
979 params->part_thread_target[id] = 0;
983 if (thr_left && thr_workload) {
984 /* compute length-weighted workloads */
987 for (diskP = DiskPartitionList; diskP && thr_left; diskP = diskP->next) {
989 delta = (diskP->vol_list.len / thr_workload) -
990 params->part_thread_target[id];
994 if (delta < thr_left) {
995 params->part_thread_target[id] += delta;
998 params->part_thread_target[id] += thr_left;
1006 /* try to assign any leftover threads to partitions that
1007 * had volume lengths closer to needing thread_target+1 */
1008 int max_residue, max_id;
1010 /* compute the residues */
1011 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1013 part_residue[id] = diskP->vol_list.len -
1014 (params->part_thread_target[id] * thr_workload);
1017 /* now try to allocate remaining threads to partitions with the
1018 * highest residues */
1021 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1023 if (part_residue[id] > max_residue) {
1024 max_residue = part_residue[id];
1033 params->part_thread_target[max_id]++;
1035 part_residue[max_id] = 0;
1040 /* punt and give any remaining threads equally to each partition */
1042 if (thr_left >= params->n_parts) {
1043 alloc = thr_left / params->n_parts;
1044 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1046 params->part_thread_target[id] += alloc;
1051 /* finish off the last of the threads */
1052 for (diskP = DiskPartitionList; thr_left && diskP; diskP = diskP->next) {
1054 params->part_thread_target[id]++;
1060 /* worker thread for parallel shutdown */
1062 VShutdownThread(void * args)
1064 struct rx_queue *qp;
1066 vshutdown_thread_t * params;
1067 int part, code, found, pass, schedule_version_save, count;
1068 struct DiskPartition *diskP;
1069 struct diskpartition_queue_t * dpq;
1072 params = (vshutdown_thread_t *) args;
1074 /* acquire the shutdown pass 0 lock */
1075 assert(pthread_mutex_lock(¶ms->lock) == 0);
1077 /* if there's still pass 0 work to be done,
1078 * get a work entry, and do a pass 0 shutdown */
1079 if (queue_IsNotEmpty(params)) {
1080 dpq = queue_First(params, diskpartition_queue_t);
1082 assert(pthread_mutex_unlock(¶ms->lock) == 0);
1088 while (ShutdownVolumeWalk_r(diskP, 0, ¶ms->part_pass_head[id]))
1090 params->stats[0][diskP->device] = count;
1091 assert(pthread_mutex_lock(¶ms->lock) == 0);
1094 params->n_threads_complete++;
1095 if (params->n_threads_complete == params->n_threads) {
1096 /* notify control thread that all workers have completed pass 0 */
1097 assert(pthread_cond_signal(¶ms->master_cv) == 0);
1099 while (params->pass == 0) {
1100 assert(pthread_cond_wait(¶ms->cv, ¶ms->lock) == 0);
1104 assert(pthread_mutex_unlock(¶ms->lock) == 0);
1107 pass = params->pass;
1110 /* now escalate through the more complicated shutdowns */
1112 schedule_version_save = params->schedule_version;
1114 /* find a disk partition to work on */
1115 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1117 if (params->part_thread_target[id] && !params->part_done_pass[id]) {
1118 params->part_thread_target[id]--;
1125 /* hmm. for some reason the controller thread couldn't find anything for
1126 * us to do. let's see if there's anything we can do */
1127 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1129 if (diskP->vol_list.len && !params->part_done_pass[id]) {
1132 } else if (!params->part_done_pass[id]) {
1133 params->part_done_pass[id] = 1;
1134 params->n_parts_done_pass++;
1136 Log("VShutdown: done shutting down volumes on partition %s.\n",
1137 VPartitionPath(diskP));
1143 /* do work on this partition until either the controller
1144 * creates a new schedule, or we run out of things to do
1145 * on this partition */
1148 while (!params->part_done_pass[id] &&
1149 (schedule_version_save == params->schedule_version)) {
1150 /* ShutdownVolumeWalk_r will drop the glock internally */
1151 if (!ShutdownVolumeWalk_r(diskP, pass, ¶ms->part_pass_head[id])) {
1152 if (!params->part_done_pass[id]) {
1153 params->part_done_pass[id] = 1;
1154 params->n_parts_done_pass++;
1156 Log("VShutdown: done shutting down volumes on partition %s.\n",
1157 VPartitionPath(diskP));
1165 params->stats[pass][id] += count;
1167 /* ok, everyone is done this pass, proceed */
1170 params->n_threads_complete++;
1171 while (params->pass == pass) {
1172 if (params->n_threads_complete == params->n_threads) {
1173 /* we are the last thread to complete, so we will
1174 * reinitialize worker pool state for the next pass */
1175 params->n_threads_complete = 0;
1176 params->n_parts_done_pass = 0;
1178 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1180 params->part_done_pass[id] = 0;
1181 params->part_pass_head[id] = queue_First(&diskP->vol_list, rx_queue);
1184 /* compute a new thread schedule before releasing all the workers */
1185 ShutdownCreateSchedule(params);
1187 /* wake up all the workers */
1188 assert(pthread_cond_broadcast(¶ms->cv) == 0);
1191 Log("VShutdown: pass %d completed using %d threads on %d partitions\n",
1192 pass, params->n_threads, params->n_parts);
1195 VOL_CV_WAIT(¶ms->cv);
1198 pass = params->pass;
1212 /* shut down all volumes on a given disk partition
1214 * note that this function will not allow mp-fast
1215 * shutdown of a partition */
1217 VShutdownByPartition_r(struct DiskPartition * dp)
1223 /* wait for other exclusive ops to finish */
1224 VVByPListWait_r(dp);
1226 /* begin exclusive access */
1227 VVByPListBeginExclusive_r(dp);
1229 /* pick the low-hanging fruit first,
1230 * then do the complicated ones last
1231 * (has the advantage of keeping
1232 * in-use volumes up until the bitter end) */
1233 for (pass = 0, total=0; pass < 4; pass++) {
1234 pass_stats[pass] = ShutdownVByPForPass_r(dp, pass);
1235 total += pass_stats[pass];
1238 /* end exclusive access */
1239 VVByPListEndExclusive_r(dp);
1241 Log("VShutdownByPartition: shut down %d volumes on %s (pass[0]=%d, pass[1]=%d, pass[2]=%d, pass[3]=%d)\n",
1242 total, VPartitionPath(dp), pass_stats[0], pass_stats[1], pass_stats[2], pass_stats[3]);
1247 /* internal shutdown functionality
1249 * for multi-pass shutdown:
1250 * 0 to only "shutdown" {pre,un}attached and error state volumes
1251 * 1 to also shutdown attached volumes w/ volume header loaded
1252 * 2 to also shutdown attached volumes w/o volume header loaded
1253 * 3 to also shutdown exclusive state volumes
1255 * caller MUST hold exclusive access on the hash chain
1256 * because we drop vol_glock_mutex internally
1258 * this function is reentrant for passes 1--3
1259 * (e.g. multiple threads can cooperate to
1260 * shutdown a partition mp-fast)
1262 * pass 0 is not scaleable because the volume state data is
1263 * synchronized by vol_glock mutex, and the locking overhead
1264 * is too high to drop the lock long enough to do linked list
1268 ShutdownVByPForPass_r(struct DiskPartition * dp, int pass)
1270 struct rx_queue * q = queue_First(&dp->vol_list, rx_queue);
1273 while (ShutdownVolumeWalk_r(dp, pass, &q))
1279 /* conditionally shutdown one volume on partition dp
1280 * returns 1 if a volume was shutdown in this pass,
1283 ShutdownVolumeWalk_r(struct DiskPartition * dp, int pass,
1284 struct rx_queue ** idx)
1286 struct rx_queue *qp, *nqp;
1291 for (queue_ScanFrom(&dp->vol_list, qp, qp, nqp, rx_queue)) {
1292 vp = (Volume *) (((char *)qp) - offsetof(Volume, vol_list));
1296 if ((V_attachState(vp) != VOL_STATE_UNATTACHED) &&
1297 (V_attachState(vp) != VOL_STATE_ERROR) &&
1298 (V_attachState(vp) != VOL_STATE_PREATTACHED)) {
1302 if ((V_attachState(vp) == VOL_STATE_ATTACHED) &&
1303 (vp->header == NULL)) {
1307 if (VIsExclusiveState(V_attachState(vp))) {
1312 DeleteVolumeFromVByPList_r(vp);
1313 VShutdownVolume_r(vp);
1323 * shutdown a specific volume
1325 /* caller MUST NOT hold a heavyweight ref on vp */
1327 VShutdownVolume_r(Volume * vp)
1331 VCreateReservation_r(vp);
1333 if (LogLevel >= 5) {
1334 Log("VShutdownVolume_r: vid=%u, device=%d, state=%hu\n",
1335 vp->hashid, vp->partition->device, V_attachState(vp));
1338 /* wait for other blocking ops to finish */
1339 VWaitExclusiveState_r(vp);
1341 assert(VIsValidState(V_attachState(vp)));
1343 switch(V_attachState(vp)) {
1344 case VOL_STATE_SALVAGING:
1345 /* make sure salvager knows we don't want
1346 * the volume back */
1347 VCancelSalvage_r(vp, SALVSYNC_SHUTDOWN);
1348 case VOL_STATE_PREATTACHED:
1349 case VOL_STATE_ERROR:
1350 VChangeState_r(vp, VOL_STATE_UNATTACHED);
1351 case VOL_STATE_UNATTACHED:
1353 case VOL_STATE_GOING_OFFLINE:
1354 case VOL_STATE_SHUTTING_DOWN:
1355 case VOL_STATE_ATTACHED:
1359 Log("VShutdown: Attempting to take volume %u offline.\n",
1362 /* take the volume offline (drops reference count) */
1363 VOffline_r(vp, "File server was shut down");
1368 VCancelReservation_r(vp);
1372 #endif /* AFS_DEMAND_ATTACH_FS */
1375 /***************************************************/
1376 /* Header I/O routines */
1377 /***************************************************/
1379 /* open a descriptor for the inode (h),
1380 * read in an on-disk structure into buffer (to) of size (size),
1381 * verify versionstamp in structure has magic (magic) and
1382 * optionally verify version (version) if (version) is nonzero
1385 ReadHeader(Error * ec, IHandle_t * h, char *to, int size, bit32 magic,
1388 struct versionStamp *vsn;
1403 if (FDH_SEEK(fdP, 0, SEEK_SET) < 0) {
1405 FDH_REALLYCLOSE(fdP);
1408 vsn = (struct versionStamp *)to;
1409 if (FDH_READ(fdP, to, size) != size || vsn->magic != magic) {
1411 FDH_REALLYCLOSE(fdP);
1416 /* Check is conditional, in case caller wants to inspect version himself */
1417 if (version && vsn->version != version) {
1423 WriteVolumeHeader_r(Error * ec, Volume * vp)
1425 IHandle_t *h = V_diskDataHandle(vp);
1435 if (FDH_SEEK(fdP, 0, SEEK_SET) < 0) {
1437 FDH_REALLYCLOSE(fdP);
1440 if (FDH_WRITE(fdP, (char *)&V_disk(vp), sizeof(V_disk(vp)))
1441 != sizeof(V_disk(vp))) {
1443 FDH_REALLYCLOSE(fdP);
1449 /* VolumeHeaderToDisk
1450 * Allows for storing 64 bit inode numbers in on-disk volume header
1453 /* convert in-memory representation of a volume header to the
1454 * on-disk representation of a volume header */
1456 VolumeHeaderToDisk(VolumeDiskHeader_t * dh, VolumeHeader_t * h)
1459 memset((char *)dh, 0, sizeof(VolumeDiskHeader_t));
1460 dh->stamp = h->stamp;
1462 dh->parent = h->parent;
1464 #ifdef AFS_64BIT_IOPS_ENV
1465 dh->volumeInfo_lo = (afs_int32) h->volumeInfo & 0xffffffff;
1466 dh->volumeInfo_hi = (afs_int32) (h->volumeInfo >> 32) & 0xffffffff;
1467 dh->smallVnodeIndex_lo = (afs_int32) h->smallVnodeIndex & 0xffffffff;
1468 dh->smallVnodeIndex_hi =
1469 (afs_int32) (h->smallVnodeIndex >> 32) & 0xffffffff;
1470 dh->largeVnodeIndex_lo = (afs_int32) h->largeVnodeIndex & 0xffffffff;
1471 dh->largeVnodeIndex_hi =
1472 (afs_int32) (h->largeVnodeIndex >> 32) & 0xffffffff;
1473 dh->linkTable_lo = (afs_int32) h->linkTable & 0xffffffff;
1474 dh->linkTable_hi = (afs_int32) (h->linkTable >> 32) & 0xffffffff;
1476 dh->volumeInfo_lo = h->volumeInfo;
1477 dh->smallVnodeIndex_lo = h->smallVnodeIndex;
1478 dh->largeVnodeIndex_lo = h->largeVnodeIndex;
1479 dh->linkTable_lo = h->linkTable;
1483 /* DiskToVolumeHeader
1484 * Converts an on-disk representation of a volume header to
1485 * the in-memory representation of a volume header.
1487 * Makes the assumption that AFS has *always*
1488 * zero'd the volume header file so that high parts of inode
1489 * numbers are 0 in older (SGI EFS) volume header files.
1492 DiskToVolumeHeader(VolumeHeader_t * h, VolumeDiskHeader_t * dh)
1494 memset((char *)h, 0, sizeof(VolumeHeader_t));
1495 h->stamp = dh->stamp;
1497 h->parent = dh->parent;
1499 #ifdef AFS_64BIT_IOPS_ENV
1501 (Inode) dh->volumeInfo_lo | ((Inode) dh->volumeInfo_hi << 32);
1503 h->smallVnodeIndex =
1504 (Inode) dh->smallVnodeIndex_lo | ((Inode) dh->
1505 smallVnodeIndex_hi << 32);
1507 h->largeVnodeIndex =
1508 (Inode) dh->largeVnodeIndex_lo | ((Inode) dh->
1509 largeVnodeIndex_hi << 32);
1511 (Inode) dh->linkTable_lo | ((Inode) dh->linkTable_hi << 32);
1513 h->volumeInfo = dh->volumeInfo_lo;
1514 h->smallVnodeIndex = dh->smallVnodeIndex_lo;
1515 h->largeVnodeIndex = dh->largeVnodeIndex_lo;
1516 h->linkTable = dh->linkTable_lo;
1521 /***************************************************/
1522 /* Volume Attachment routines */
1523 /***************************************************/
1525 #ifdef AFS_DEMAND_ATTACH_FS
1527 * pre-attach a volume given its path.
1529 * @param[out] ec outbound error code
1530 * @param[in] partition partition path string
1531 * @param[in] name volume id string
1533 * @return volume object pointer
1535 * @note A pre-attached volume will only have its partition
1536 * and hashid fields initialized. At first call to
1537 * VGetVolume, the volume will be fully attached.
1541 VPreAttachVolumeByName(Error * ec, char *partition, char *name)
1545 vp = VPreAttachVolumeByName_r(ec, partition, name);
1551 * pre-attach a volume given its path.
1553 * @param[out] ec outbound error code
1554 * @param[in] partition path to vice partition
1555 * @param[in] name volume id string
1557 * @return volume object pointer
1559 * @pre VOL_LOCK held
1561 * @internal volume package internal use only.
1564 VPreAttachVolumeByName_r(Error * ec, char *partition, char *name)
1566 return VPreAttachVolumeById_r(ec,
1568 VolumeNumber(name));
1572 * pre-attach a volume given its path and numeric volume id.
1574 * @param[out] ec error code return
1575 * @param[in] partition path to vice partition
1576 * @param[in] volumeId numeric volume id
1578 * @return volume object pointer
1580 * @pre VOL_LOCK held
1582 * @internal volume package internal use only.
1585 VPreAttachVolumeById_r(Error * ec,
1590 struct DiskPartition *partp;
1594 assert(programType == fileServer);
1596 if (!(partp = VGetPartition_r(partition, 0))) {
1598 Log("VPreAttachVolumeById_r: Error getting partition (%s)\n", partition);
1602 vp = VLookupVolume_r(ec, volumeId, NULL);
1607 return VPreAttachVolumeByVp_r(ec, partp, vp, volumeId);
1611 * preattach a volume.
1613 * @param[out] ec outbound error code
1614 * @param[in] partp pointer to partition object
1615 * @param[in] vp pointer to volume object
1616 * @param[in] vid volume id
1618 * @return volume object pointer
1620 * @pre VOL_LOCK is held.
1622 * @warning Returned volume object pointer does not have to
1623 * equal the pointer passed in as argument vp. There
1624 * are potential race conditions which can result in
1625 * the pointers having different values. It is up to
1626 * the caller to make sure that references are handled
1627 * properly in this case.
1629 * @note If there is already a volume object registered with
1630 * the same volume id, its pointer MUST be passed as
1631 * argument vp. Failure to do so will result in a silent
1632 * failure to preattach.
1634 * @internal volume package internal use only.
1637 VPreAttachVolumeByVp_r(Error * ec,
1638 struct DiskPartition * partp,
1646 /* check to see if pre-attach already happened */
1648 (V_attachState(vp) != VOL_STATE_UNATTACHED) &&
1649 (V_attachState(vp) != VOL_STATE_PREATTACHED) &&
1650 !VIsErrorState(V_attachState(vp))) {
1652 * pre-attach is a no-op in all but the following cases:
1654 * - volume is unattached
1655 * - volume is in an error state
1656 * - volume is pre-attached
1658 Log("VPreattachVolumeByVp_r: volume %u not in quiescent state\n", vid);
1661 /* we're re-attaching a volume; clear out some old state */
1662 memset(&vp->salvage, 0, sizeof(struct VolumeOnlineSalvage));
1664 if (V_partition(vp) != partp) {
1665 /* XXX potential race */
1666 DeleteVolumeFromVByPList_r(vp);
1669 /* if we need to allocate a new Volume struct,
1670 * go ahead and drop the vol glock, otherwise
1671 * do the basic setup synchronised, as it's
1672 * probably not worth dropping the lock */
1675 /* allocate the volume structure */
1676 vp = nvp = (Volume *) malloc(sizeof(Volume));
1678 memset(vp, 0, sizeof(Volume));
1679 queue_Init(&vp->vnode_list);
1680 assert(pthread_cond_init(&V_attachCV(vp), NULL) == 0);
1683 /* link the volume with its associated vice partition */
1684 vp->device = partp->device;
1685 vp->partition = partp;
1688 /* if we dropped the lock, reacquire the lock,
1689 * check for pre-attach races, and then add
1690 * the volume to the hash table */
1693 nvp = VLookupVolume_r(ec, vid, NULL);
1698 } else if (nvp) { /* race detected */
1703 /* hack to make up for VChangeState_r() decrementing
1704 * the old state counter */
1705 VStats.state_levels[0]++;
1709 /* put pre-attached volume onto the hash table
1710 * and bring it up to the pre-attached state */
1711 AddVolumeToHashTable(vp, vp->hashid);
1712 AddVolumeToVByPList_r(vp);
1713 VLRU_Init_Node_r(vp);
1714 VChangeState_r(vp, VOL_STATE_PREATTACHED);
1717 Log("VPreAttachVolumeByVp_r: volume %u pre-attached\n", vp->hashid);
1725 #endif /* AFS_DEMAND_ATTACH_FS */
1727 /* Attach an existing volume, given its pathname, and return a
1728 pointer to the volume header information. The volume also
1729 normally goes online at this time. An offline volume
1730 must be reattached to make it go online */
1732 VAttachVolumeByName(Error * ec, char *partition, char *name, int mode)
1736 retVal = VAttachVolumeByName_r(ec, partition, name, mode);
1742 VAttachVolumeByName_r(Error * ec, char *partition, char *name, int mode)
1744 register Volume *vp = NULL, *svp = NULL;
1746 struct afs_stat status;
1747 struct VolumeDiskHeader diskHeader;
1748 struct VolumeHeader iheader;
1749 struct DiskPartition *partp;
1753 #ifdef AFS_DEMAND_ATTACH_FS
1754 VolumeStats stats_save;
1755 #endif /* AFS_DEMAND_ATTACH_FS */
1759 volumeId = VolumeNumber(name);
1761 if (!(partp = VGetPartition_r(partition, 0))) {
1763 Log("VAttachVolume: Error getting partition (%s)\n", partition);
1767 if (programType == volumeUtility) {
1769 VLockPartition_r(partition);
1770 } else if (programType == fileServer) {
1771 #ifdef AFS_DEMAND_ATTACH_FS
1772 /* lookup the volume in the hash table */
1773 vp = VLookupVolume_r(ec, volumeId, NULL);
1779 /* save any counters that are supposed to
1780 * be monotonically increasing over the
1781 * lifetime of the fileserver */
1782 memcpy(&stats_save, &vp->stats, sizeof(VolumeStats));
1784 memset(&stats_save, 0, sizeof(VolumeStats));
1787 /* if there's something in the hash table, and it's not
1788 * in the pre-attach state, then we may need to detach
1789 * it before proceeding */
1790 if (vp && (V_attachState(vp) != VOL_STATE_PREATTACHED)) {
1791 VCreateReservation_r(vp);
1792 VWaitExclusiveState_r(vp);
1794 /* at this point state must be one of:
1803 if (vp->specialStatus == VBUSY)
1806 /* if it's already attached, see if we can return it */
1807 if (V_attachState(vp) == VOL_STATE_ATTACHED) {
1808 VGetVolumeByVp_r(ec, vp);
1810 VCancelReservation_r(vp);
1814 /* otherwise, we need to detach, and attempt to re-attach */
1815 VDetachVolume_r(ec, vp);
1817 Log("VAttachVolume: Error detaching old volume instance (%s)\n", name);
1820 /* if it isn't fully attached, delete from the hash tables,
1821 and let the refcounter handle the rest */
1822 DeleteVolumeFromHashTable(vp);
1823 DeleteVolumeFromVByPList_r(vp);
1826 VCancelReservation_r(vp);
1830 /* pre-attach volume if it hasn't been done yet */
1832 (V_attachState(vp) == VOL_STATE_UNATTACHED) ||
1833 (V_attachState(vp) == VOL_STATE_ERROR)) {
1835 vp = VPreAttachVolumeByVp_r(ec, partp, vp, volumeId);
1843 /* handle pre-attach races
1845 * multiple threads can race to pre-attach a volume,
1846 * but we can't let them race beyond that
1848 * our solution is to let the first thread to bring
1849 * the volume into an exclusive state win; the other
1850 * threads just wait until it finishes bringing the
1851 * volume online, and then they do a vgetvolumebyvp
1853 if (svp && (svp != vp)) {
1854 /* wait for other exclusive ops to finish */
1855 VCreateReservation_r(vp);
1856 VWaitExclusiveState_r(vp);
1858 /* get a heavyweight ref, kill the lightweight ref, and return */
1859 VGetVolumeByVp_r(ec, vp);
1860 VCancelReservation_r(vp);
1864 /* at this point, we are chosen as the thread to do
1865 * demand attachment for this volume. all other threads
1866 * doing a getvolume on vp->hashid will block until we finish */
1868 /* make sure any old header cache entries are invalidated
1869 * before proceeding */
1870 FreeVolumeHeader(vp);
1872 VChangeState_r(vp, VOL_STATE_ATTACHING);
1874 /* restore any saved counters */
1875 memcpy(&vp->stats, &stats_save, sizeof(VolumeStats));
1876 #else /* AFS_DEMAND_ATTACH_FS */
1877 vp = VGetVolume_r(ec, volumeId);
1881 if (vp->specialStatus == VBUSY)
1883 VDetachVolume_r(ec, vp);
1885 Log("VAttachVolume: Error detaching volume (%s)\n", name);
1889 #endif /* AFS_DEMAND_ATTACH_FS */
1893 strcpy(path, VPartitionPath(partp));
1899 if ((fd = afs_open(path, O_RDONLY)) == -1 || afs_fstat(fd, &status) == -1) {
1900 Log("VAttachVolume: Failed to open %s (errno %d)\n", path, errno);
1907 n = read(fd, &diskHeader, sizeof(diskHeader));
1909 if (n != sizeof(diskHeader)
1910 || diskHeader.stamp.magic != VOLUMEHEADERMAGIC) {
1911 Log("VAttachVolume: Error reading volume header %s\n", path);
1916 if (diskHeader.stamp.version != VOLUMEHEADERVERSION) {
1917 Log("VAttachVolume: Volume %s, version number is incorrect; volume needs salvaged\n", path);
1923 DiskToVolumeHeader(&iheader, &diskHeader);
1924 #ifdef FSSYNC_BUILD_CLIENT
1925 if (programType == volumeUtility && mode != V_SECRETLY && mode != V_PEEK) {
1927 if (FSYNC_VolOp(iheader.id, partition, FSYNC_VOL_NEEDVOLUME, mode, NULL)
1929 Log("VAttachVolume: attach of volume %u apparently denied by file server\n", iheader.id);
1930 *ec = VNOVOL; /* XXXX */
1938 vp = (Volume *) calloc(1, sizeof(Volume));
1940 vp->device = partp->device;
1941 vp->partition = partp;
1942 queue_Init(&vp->vnode_list);
1943 #ifdef AFS_DEMAND_ATTACH_FS
1944 assert(pthread_cond_init(&V_attachCV(vp), NULL) == 0);
1945 #endif /* AFS_DEMAND_ATTACH_FS */
1948 /* attach2 is entered without any locks, and returns
1949 * with vol_glock_mutex held */
1950 vp = attach2(ec, volumeId, path, &iheader, partp, vp, isbusy, mode);
1952 if (programType == volumeUtility && vp) {
1953 #ifdef AFS_DEMAND_ATTACH_FS
1954 /* for dafs, we should tell the fileserver, except for V_PEEK
1955 * where we know it is not necessary */
1956 if (mode == V_PEEK) {
1957 vp->needsPutBack = 0;
1959 vp->needsPutBack = 1;
1961 #else /* !AFS_DEMAND_ATTACH_FS */
1962 /* duplicate computation in fssync.c about whether the server
1963 * takes the volume offline or not. If the volume isn't
1964 * offline, we must not return it when we detach the volume,
1965 * or the server will abort */
1966 if (mode == V_READONLY || mode == V_PEEK
1967 || (!VolumeWriteable(vp) && (mode == V_CLONE || mode == V_DUMP)))
1968 vp->needsPutBack = 0;
1970 vp->needsPutBack = 1;
1971 #endif /* !AFS_DEMAND_ATTACH_FS */
1973 /* OK, there's a problem here, but one that I don't know how to
1974 * fix right now, and that I don't think should arise often.
1975 * Basically, we should only put back this volume to the server if
1976 * it was given to us by the server, but since we don't have a vp,
1977 * we can't run the VolumeWriteable function to find out as we do
1978 * above when computing vp->needsPutBack. So we send it back, but
1979 * there's a path in VAttachVolume on the server which may abort
1980 * if this volume doesn't have a header. Should be pretty rare
1981 * for all of that to happen, but if it does, probably the right
1982 * fix is for the server to allow the return of readonly volumes
1983 * that it doesn't think are really checked out. */
1984 #ifdef FSSYNC_BUILD_CLIENT
1985 if (programType == volumeUtility && vp == NULL &&
1986 mode != V_SECRETLY && mode != V_PEEK) {
1987 FSYNC_VolOp(iheader.id, partition, FSYNC_VOL_ON, 0, NULL);
1990 if (programType == fileServer && vp) {
1991 #ifdef AFS_DEMAND_ATTACH_FS
1993 * we can get here in cases where we don't "own"
1994 * the volume (e.g. volume owned by a utility).
1995 * short circuit around potential disk header races.
1997 if (V_attachState(vp) != VOL_STATE_ATTACHED) {
2001 V_needsCallback(vp) = 0;
2003 if (VInit >= 2 && V_BreakVolumeCallbacks) {
2004 Log("VAttachVolume: Volume %u was changed externally; breaking callbacks\n", V_id(vp));
2005 (*V_BreakVolumeCallbacks) (V_id(vp));
2008 VUpdateVolume_r(ec, vp, 0);
2010 Log("VAttachVolume: Error updating volume\n");
2015 if (VolumeWriteable(vp) && V_dontSalvage(vp) == 0) {
2016 #ifndef AFS_DEMAND_ATTACH_FS
2017 /* This is a hack: by temporarily setting the incore
2018 * dontSalvage flag ON, the volume will be put back on the
2019 * Update list (with dontSalvage OFF again). It will then
2020 * come back in N minutes with DONT_SALVAGE eventually
2021 * set. This is the way that volumes that have never had
2022 * it set get it set; or that volumes that have been
2023 * offline without DONT SALVAGE having been set also
2024 * eventually get it set */
2025 V_dontSalvage(vp) = DONT_SALVAGE;
2026 #endif /* !AFS_DEMAND_ATTACH_FS */
2027 VAddToVolumeUpdateList_r(ec, vp);
2029 Log("VAttachVolume: Error adding volume to update list\n");
2036 Log("VOnline: volume %u (%s) attached and online\n", V_id(vp),
2041 if (programType == volumeUtility) {
2042 VUnlockPartition_r(partition);
2045 #ifdef AFS_DEMAND_ATTACH_FS
2046 /* attach failed; make sure we're in error state */
2047 if (vp && !VIsErrorState(V_attachState(vp))) {
2048 VChangeState_r(vp, VOL_STATE_ERROR);
2050 #endif /* AFS_DEMAND_ATTACH_FS */
2057 #ifdef AFS_DEMAND_ATTACH_FS
2058 /* VAttachVolumeByVp_r
2060 * finish attaching a volume that is
2061 * in a less than fully attached state
2063 /* caller MUST hold a ref count on vp */
2065 VAttachVolumeByVp_r(Error * ec, Volume * vp, int mode)
2067 char name[VMAXPATHLEN];
2068 int fd, n, reserve = 0;
2069 struct afs_stat status;
2070 struct VolumeDiskHeader diskHeader;
2071 struct VolumeHeader iheader;
2072 struct DiskPartition *partp;
2077 VolumeStats stats_save;
2080 /* volume utility should never call AttachByVp */
2081 assert(programType == fileServer);
2083 volumeId = vp->hashid;
2084 partp = vp->partition;
2085 VolumeExternalName_r(volumeId, name, sizeof(name));
2088 /* if another thread is performing a blocking op, wait */
2089 VWaitExclusiveState_r(vp);
2091 memcpy(&stats_save, &vp->stats, sizeof(VolumeStats));
2093 /* if it's already attached, see if we can return it */
2094 if (V_attachState(vp) == VOL_STATE_ATTACHED) {
2095 VGetVolumeByVp_r(ec, vp);
2099 if (vp->specialStatus == VBUSY)
2101 VDetachVolume_r(ec, vp);
2103 Log("VAttachVolume: Error detaching volume (%s)\n", name);
2109 /* pre-attach volume if it hasn't been done yet */
2111 (V_attachState(vp) == VOL_STATE_UNATTACHED) ||
2112 (V_attachState(vp) == VOL_STATE_ERROR)) {
2113 nvp = VPreAttachVolumeByVp_r(ec, partp, vp, volumeId);
2119 VCreateReservation_r(nvp);
2125 VChangeState_r(vp, VOL_STATE_ATTACHING);
2127 /* restore monotonically increasing stats */
2128 memcpy(&vp->stats, &stats_save, sizeof(VolumeStats));
2133 /* compute path to disk header,
2135 * and verify magic and version stamps */
2136 strcpy(path, VPartitionPath(partp));
2142 if ((fd = afs_open(path, O_RDONLY)) == -1 || afs_fstat(fd, &status) == -1) {
2143 Log("VAttachVolume: Failed to open %s (errno %d)\n", path, errno);
2150 n = read(fd, &diskHeader, sizeof(diskHeader));
2152 if (n != sizeof(diskHeader)
2153 || diskHeader.stamp.magic != VOLUMEHEADERMAGIC) {
2154 Log("VAttachVolume: Error reading volume header %s\n", path);
2159 if (diskHeader.stamp.version != VOLUMEHEADERVERSION) {
2160 Log("VAttachVolume: Volume %s, version number is incorrect; volume needs salvaged\n", path);
2166 /* convert on-disk header format to in-memory header format */
2167 DiskToVolumeHeader(&iheader, &diskHeader);
2171 * NOTE: attach2 is entered without any locks, and returns
2172 * with vol_glock_mutex held */
2173 vp = attach2(ec, volumeId, path, &iheader, partp, vp, isbusy, mode);
2176 * the event that an error was encountered, or
2177 * the volume was not brought to an attached state
2178 * for any reason, skip to the end. We cannot
2179 * safely call VUpdateVolume unless we "own" it.
2183 (V_attachState(vp) != VOL_STATE_ATTACHED)) {
2187 V_needsCallback(vp) = 0;
2188 VUpdateVolume_r(ec, vp, 0);
2190 Log("VAttachVolume: Error updating volume %u\n", vp->hashid);
2194 if (VolumeWriteable(vp) && V_dontSalvage(vp) == 0) {
2195 #ifndef AFS_DEMAND_ATTACH_FS
2196 /* This is a hack: by temporarily setting the incore
2197 * dontSalvage flag ON, the volume will be put back on the
2198 * Update list (with dontSalvage OFF again). It will then
2199 * come back in N minutes with DONT_SALVAGE eventually
2200 * set. This is the way that volumes that have never had
2201 * it set get it set; or that volumes that have been
2202 * offline without DONT SALVAGE having been set also
2203 * eventually get it set */
2204 V_dontSalvage(vp) = DONT_SALVAGE;
2205 #endif /* !AFS_DEMAND_ATTACH_FS */
2206 VAddToVolumeUpdateList_r(ec, vp);
2208 Log("VAttachVolume: Error adding volume %u to update list\n", vp->hashid);
2215 Log("VOnline: volume %u (%s) attached and online\n", V_id(vp),
2219 VCancelReservation_r(nvp);
2222 if (*ec && (*ec != VOFFLINE) && (*ec != VSALVAGE)) {
2223 if (vp && !VIsErrorState(V_attachState(vp))) {
2224 VChangeState_r(vp, VOL_STATE_ERROR);
2231 #endif /* AFS_DEMAND_ATTACH_FS */
2234 * called without any locks held
2235 * returns with vol_glock_mutex held
2238 attach2(Error * ec, VolId volumeId, char *path, register struct VolumeHeader * header,
2239 struct DiskPartition * partp, register Volume * vp, int isbusy, int mode)
2241 vp->specialStatus = (byte) (isbusy ? VBUSY : 0);
2242 IH_INIT(vp->vnodeIndex[vLarge].handle, partp->device, header->parent,
2243 header->largeVnodeIndex);
2244 IH_INIT(vp->vnodeIndex[vSmall].handle, partp->device, header->parent,
2245 header->smallVnodeIndex);
2246 IH_INIT(vp->diskDataHandle, partp->device, header->parent,
2247 header->volumeInfo);
2248 IH_INIT(vp->linkHandle, partp->device, header->parent, header->linkTable);
2249 vp->shuttingDown = 0;
2250 vp->goingOffline = 0;
2252 #ifdef AFS_DEMAND_ATTACH_FS
2253 vp->stats.last_attach = FT_ApproxTime();
2254 vp->stats.attaches++;
2258 IncUInt64(&VStats.attaches);
2259 vp->cacheCheck = ++VolumeCacheCheck;
2260 /* just in case this ever rolls over */
2261 if (!vp->cacheCheck)
2262 vp->cacheCheck = ++VolumeCacheCheck;
2263 GetVolumeHeader(vp);
2266 #if defined(AFS_DEMAND_ATTACH_FS) && defined(FSSYNC_BUILD_CLIENT)
2267 /* demand attach changes the V_PEEK mechanism
2269 * we can now suck the current disk data structure over
2270 * the fssync interface without going to disk
2272 * (technically, we don't need to restrict this feature
2273 * to demand attach fileservers. However, I'm trying
2274 * to limit the number of common code changes)
2276 if (programType != fileServer && mode == V_PEEK) {
2278 res.payload.len = sizeof(VolumeDiskData);
2279 res.payload.buf = &vp->header->diskstuff;
2281 if (FSYNC_VolOp(volumeId,
2282 VPartitionPath(partp),
2283 FSYNC_VOL_QUERY_HDR,
2286 goto disk_header_loaded;
2289 #endif /* AFS_DEMAND_ATTACH_FS && FSSYNC_BUILD_CLIENT */
2290 (void)ReadHeader(ec, V_diskDataHandle(vp), (char *)&V_disk(vp),
2291 sizeof(V_disk(vp)), VOLUMEINFOMAGIC, VOLUMEINFOVERSION);
2293 #ifdef AFS_DEMAND_ATTACH_FS
2296 IncUInt64(&VStats.hdr_loads);
2297 IncUInt64(&vp->stats.hdr_loads);
2299 #endif /* AFS_DEMAND_ATTACH_FS */
2302 Log("VAttachVolume: Error reading diskDataHandle vol header %s; error=%u\n", path, *ec);
2307 #ifdef AFS_DEMAND_ATTACH_FS
2310 /* check for pending volume operations */
2311 if (vp->pending_vol_op) {
2312 /* see if the pending volume op requires exclusive access */
2313 if (!VVolOpLeaveOnline_r(vp, vp->pending_vol_op)) {
2314 /* mark the volume down */
2316 VChangeState_r(vp, VOL_STATE_UNATTACHED);
2317 if (V_offlineMessage(vp)[0] == '\0')
2318 strlcpy(V_offlineMessage(vp),
2319 "A volume utility is running.",
2320 sizeof(V_offlineMessage(vp)));
2321 V_offlineMessage(vp)[sizeof(V_offlineMessage(vp)) - 1] = '\0';
2323 /* check to see if we should set the specialStatus flag */
2324 if (VVolOpSetVBusy_r(vp, vp->pending_vol_op)) {
2325 vp->specialStatus = VBUSY;
2330 V_attachFlags(vp) |= VOL_HDR_LOADED;
2331 vp->stats.last_hdr_load = vp->stats.last_attach;
2333 #endif /* AFS_DEMAND_ATTACH_FS */
2336 struct IndexFileHeader iHead;
2338 #if OPENAFS_VOL_STATS
2340 * We just read in the diskstuff part of the header. If the detailed
2341 * volume stats area has not yet been initialized, we should bzero the
2342 * area and mark it as initialized.
2344 if (!(V_stat_initialized(vp))) {
2345 memset((char *)(V_stat_area(vp)), 0, VOL_STATS_BYTES);
2346 V_stat_initialized(vp) = 1;
2348 #endif /* OPENAFS_VOL_STATS */
2350 (void)ReadHeader(ec, vp->vnodeIndex[vSmall].handle,
2351 (char *)&iHead, sizeof(iHead),
2352 SMALLINDEXMAGIC, SMALLINDEXVERSION);
2355 Log("VAttachVolume: Error reading smallVnode vol header %s; error=%u\n", path, *ec);
2360 struct IndexFileHeader iHead;
2362 (void)ReadHeader(ec, vp->vnodeIndex[vLarge].handle,
2363 (char *)&iHead, sizeof(iHead),
2364 LARGEINDEXMAGIC, LARGEINDEXVERSION);
2367 Log("VAttachVolume: Error reading largeVnode vol header %s; error=%u\n", path, *ec);
2371 #ifdef AFS_NAMEI_ENV
2373 struct versionStamp stamp;
2375 (void)ReadHeader(ec, V_linkHandle(vp), (char *)&stamp,
2376 sizeof(stamp), LINKTABLEMAGIC, LINKTABLEVERSION);
2379 Log("VAttachVolume: Error reading namei vol header %s; error=%u\n", path, *ec);
2382 #endif /* AFS_NAMEI_ENV */
2384 #if defined(AFS_DEMAND_ATTACH_FS)
2385 if (*ec && ((*ec != VOFFLINE) || (V_attachState(vp) != VOL_STATE_UNATTACHED))) {
2387 if (programType == fileServer) {
2388 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
2391 Log("VAttachVolume: Error attaching volume %s; volume needs salvage; error=%u\n", path, *ec);
2397 /* volume operation in progress */
2401 #else /* AFS_DEMAND_ATTACH_FS */
2403 Log("VAttachVolume: Error attaching volume %s; volume needs salvage; error=%u\n", path, *ec);
2408 #endif /* AFS_DEMAND_ATTACH_FS */
2410 if (V_needsSalvaged(vp)) {
2411 if (vp->specialStatus)
2412 vp->specialStatus = 0;
2414 #if defined(AFS_DEMAND_ATTACH_FS)
2415 if (programType == fileServer) {
2416 VRequestSalvage_r(ec, vp, SALVSYNC_NEEDED, VOL_SALVAGE_INVALIDATE_HEADER);
2419 Log("VAttachVolume: volume salvage flag is ON for %s; volume needs salvage\n", path);
2423 #else /* AFS_DEMAND_ATTACH_FS */
2426 #endif /* AFS_DEMAND_ATTACH_FS */
2431 if (programType == fileServer) {
2432 #ifndef FAST_RESTART
2433 if (V_inUse(vp) && VolumeWriteable(vp)) {
2434 if (!V_needsSalvaged(vp)) {
2435 V_needsSalvaged(vp) = 1;
2436 VUpdateVolume_r(ec, vp, 0);
2438 #if defined(AFS_DEMAND_ATTACH_FS)
2439 VRequestSalvage_r(ec, vp, SALVSYNC_NEEDED, VOL_SALVAGE_INVALIDATE_HEADER);
2441 #else /* AFS_DEMAND_ATTACH_FS */
2442 Log("VAttachVolume: volume %s needs to be salvaged; not attached.\n", path);
2445 #endif /* AFS_DEMAND_ATTACH_FS */
2448 #endif /* FAST_RESTART */
2450 if (V_destroyMe(vp) == DESTROY_ME) {
2451 #if defined(AFS_DEMAND_ATTACH_FS)
2452 /* schedule a salvage so the volume goes away on disk */
2453 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
2454 VChangeState_r(vp, VOL_STATE_ERROR);
2456 #endif /* AFS_DEMAND_ATTACH_FS */
2458 Log("VAttachVolume: volume %s is junk; it should be destroyed at next salvage\n", path);
2464 vp->nextVnodeUnique = V_uniquifier(vp);
2465 vp->vnodeIndex[vSmall].bitmap = vp->vnodeIndex[vLarge].bitmap = NULL;
2466 #ifndef BITMAP_LATER
2467 if (programType == fileServer && VolumeWriteable(vp)) {
2469 for (i = 0; i < nVNODECLASSES; i++) {
2470 VGetBitmap_r(ec, vp, i);
2472 #ifdef AFS_DEMAND_ATTACH_FS
2473 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
2475 #else /* AFS_DEMAND_ATTACH_FS */
2477 #endif /* AFS_DEMAND_ATTACH_FS */
2478 Log("VAttachVolume: error getting bitmap for volume (%s)\n",
2484 #endif /* BITMAP_LATER */
2486 if (programType == fileServer) {
2487 if (vp->specialStatus)
2488 vp->specialStatus = 0;
2489 if (V_blessed(vp) && V_inService(vp) && !V_needsSalvaged(vp)) {
2491 V_offlineMessage(vp)[0] = '\0';
2495 AddVolumeToHashTable(vp, V_id(vp));
2496 #ifdef AFS_DEMAND_ATTACH_FS
2497 AddVolumeToVByPList_r(vp);
2499 if ((programType != fileServer) ||
2501 VChangeState_r(vp, VOL_STATE_ATTACHED);
2503 VChangeState_r(vp, VOL_STATE_UNATTACHED);
2509 /* Attach an existing volume.
2510 The volume also normally goes online at this time.
2511 An offline volume must be reattached to make it go online.
2515 VAttachVolume(Error * ec, VolumeId volumeId, int mode)
2519 retVal = VAttachVolume_r(ec, volumeId, mode);
2525 VAttachVolume_r(Error * ec, VolumeId volumeId, int mode)
2528 GetVolumePath(ec, volumeId, &part, &name);
2530 register Volume *vp;
2532 vp = VGetVolume_r(&error, volumeId);
2534 assert(V_inUse(vp) == 0);
2535 VDetachVolume_r(ec, vp);
2539 return VAttachVolumeByName_r(ec, part, name, mode);
2542 /* Increment a reference count to a volume, sans context swaps. Requires
2543 * possibly reading the volume header in from the disk, since there's
2544 * an invariant in the volume package that nUsers>0 ==> vp->header is valid.
2546 * N.B. This call can fail if we can't read in the header!! In this case
2547 * we still guarantee we won't context swap, but the ref count won't be
2548 * incremented (otherwise we'd violate the invariant).
2550 /* NOTE: with the demand attach fileserver extensions, the global lock
2551 * is dropped within VHold */
2552 #ifdef AFS_DEMAND_ATTACH_FS
2554 VHold_r(register Volume * vp)
2558 VCreateReservation_r(vp);
2559 VWaitExclusiveState_r(vp);
2561 LoadVolumeHeader(&error, vp);
2563 VCancelReservation_r(vp);
2567 VCancelReservation_r(vp);
2570 #else /* AFS_DEMAND_ATTACH_FS */
2572 VHold_r(register Volume * vp)
2576 LoadVolumeHeader(&error, vp);
2582 #endif /* AFS_DEMAND_ATTACH_FS */
2585 VHold(register Volume * vp)
2589 retVal = VHold_r(vp);
2595 /***************************************************/
2596 /* get and put volume routines */
2597 /***************************************************/
2600 * put back a heavyweight reference to a volume object.
2602 * @param[in] vp volume object pointer
2604 * @pre VOL_LOCK held
2606 * @post heavyweight volume reference put back.
2607 * depending on state, volume may have been taken offline,
2608 * detached, salvaged, freed, etc.
2610 * @internal volume package internal use only
2613 VPutVolume_r(register Volume * vp)
2615 assert(--vp->nUsers >= 0);
2616 if (vp->nUsers == 0) {
2618 ReleaseVolumeHeader(vp->header);
2619 #ifdef AFS_DEMAND_ATTACH_FS
2620 if (!VCheckDetach(vp)) {
2624 #else /* AFS_DEMAND_ATTACH_FS */
2626 #endif /* AFS_DEMAND_ATTACH_FS */
2631 VPutVolume(register Volume * vp)
2639 /* Get a pointer to an attached volume. The pointer is returned regardless
2640 of whether or not the volume is in service or on/off line. An error
2641 code, however, is returned with an indication of the volume's status */
2643 VGetVolume(Error * ec, Error * client_ec, VolId volumeId)
2647 retVal = GetVolume(ec, client_ec, volumeId, NULL, 0);
2653 VGetVolume_r(Error * ec, VolId volumeId)
2655 return GetVolume(ec, NULL, volumeId, NULL, 0);
2658 /* try to get a volume we've previously looked up */
2659 /* for demand attach fs, caller MUST NOT hold a ref count on vp */
2661 VGetVolumeByVp_r(Error * ec, Volume * vp)
2663 return GetVolume(ec, NULL, vp->hashid, vp, 0);
2666 /* private interface for getting a volume handle
2667 * volumeId must be provided.
2668 * hint is an optional parameter to speed up hash lookups
2669 * flags is not used at this time
2671 /* for demand attach fs, caller MUST NOT hold a ref count on hint */
2673 GetVolume(Error * ec, Error * client_ec, VolId volumeId, Volume * hint, int flags)
2676 /* pull this profiling/debugging code out of regular builds */
2678 #define VGET_CTR_INC(x) x++
2679 unsigned short V0 = 0, V1 = 0, V2 = 0, V3 = 0, V5 = 0, V6 =
2680 0, V7 = 0, V8 = 0, V9 = 0;
2681 unsigned short V10 = 0, V11 = 0, V12 = 0, V13 = 0, V14 = 0, V15 = 0;
2683 #define VGET_CTR_INC(x)
2685 #ifdef AFS_DEMAND_ATTACH_FS
2686 Volume *avp, * rvp = hint;
2689 #ifdef AFS_DEMAND_ATTACH_FS
2691 VCreateReservation_r(rvp);
2693 #endif /* AFS_DEMAND_ATTACH_FS */
2701 vp = VLookupVolume_r(ec, volumeId, vp);
2707 #ifdef AFS_DEMAND_ATTACH_FS
2708 if (rvp && (rvp != vp)) {
2709 /* break reservation on old vp */
2710 VCancelReservation_r(rvp);
2713 #endif /* AFS_DEMAND_ATTACH_FS */
2719 /* Until we have reached an initialization level of 2
2720 * we don't know whether this volume exists or not.
2721 * We can't sleep and retry later because before a volume
2722 * is attached, the caller tries to get it first. Just
2723 * return VOFFLINE and the caller can choose whether to
2724 * retry the command or not. */
2734 IncUInt64(&VStats.hdr_gets);
2736 #ifdef AFS_DEMAND_ATTACH_FS
2737 /* block if someone else is performing an exclusive op on this volume */
2740 VCreateReservation_r(rvp);
2742 VWaitExclusiveState_r(vp);
2744 /* short circuit with VNOVOL in the following circumstances:
2747 * VOL_STATE_SHUTTING_DOWN
2749 if ((V_attachState(vp) == VOL_STATE_ERROR) ||
2750 (V_attachState(vp) == VOL_STATE_SHUTTING_DOWN)) {
2757 * short circuit with VOFFLINE in the following circumstances:
2759 * VOL_STATE_UNATTACHED
2761 if (V_attachState(vp) == VOL_STATE_UNATTACHED) {
2767 /* allowable states:
2775 if (vp->salvage.requested) {
2776 VUpdateSalvagePriority_r(vp);
2779 if (V_attachState(vp) == VOL_STATE_PREATTACHED) {
2780 avp = VAttachVolumeByVp_r(ec, vp, 0);
2783 /* VAttachVolumeByVp_r can return a pointer
2784 * != the vp passed to it under certain
2785 * conditions; make sure we don't leak
2786 * reservations if that happens */
2788 VCancelReservation_r(rvp);
2790 VCreateReservation_r(rvp);
2800 if (!vp->pending_vol_op) {
2815 if ((V_attachState(vp) == VOL_STATE_SALVAGING) ||
2816 (*ec == VSALVAGING)) {
2818 /* see CheckVnode() in afsfileprocs.c for an explanation
2819 * of this error code logic */
2820 afs_uint32 now = FT_ApproxTime();
2821 if ((vp->stats.last_salvage + (10 * 60)) >= now) {
2824 *client_ec = VRESTARTING;
2833 LoadVolumeHeader(ec, vp);
2836 /* Only log the error if it was a totally unexpected error. Simply
2837 * a missing inode is likely to be caused by the volume being deleted */
2838 if (errno != ENXIO || LogLevel)
2839 Log("Volume %u: couldn't reread volume header\n",
2841 #ifdef AFS_DEMAND_ATTACH_FS
2842 if (programType == fileServer) {
2843 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
2848 #else /* AFS_DEMAND_ATTACH_FS */
2851 #endif /* AFS_DEMAND_ATTACH_FS */
2855 #ifdef AFS_DEMAND_ATTACH_FS
2857 * this test MUST happen after the volume header is loaded
2859 if (vp->pending_vol_op && !VVolOpLeaveOnline_r(vp, vp->pending_vol_op)) {
2861 /* see CheckVnode() in afsfileprocs.c for an explanation
2862 * of this error code logic */
2863 afs_uint32 now = FT_ApproxTime();
2864 if ((vp->stats.last_vol_op + (10 * 60)) >= now) {
2867 *client_ec = VRESTARTING;
2871 ReleaseVolumeHeader(vp->header);
2875 #endif /* AFS_DEMAND_ATTACH_FS */
2878 if (vp->shuttingDown) {
2885 if (programType == fileServer) {
2887 if (vp->goingOffline) {
2889 #ifdef AFS_DEMAND_ATTACH_FS
2890 /* wait for the volume to go offline */
2891 if (V_attachState(vp) == VOL_STATE_GOING_OFFLINE) {
2892 VWaitStateChange_r(vp);
2894 #elif defined(AFS_PTHREAD_ENV)
2895 VOL_CV_WAIT(&vol_put_volume_cond);
2896 #else /* AFS_PTHREAD_ENV */
2897 LWP_WaitProcess(VPutVolume);
2898 #endif /* AFS_PTHREAD_ENV */
2901 if (vp->specialStatus) {
2903 *ec = vp->specialStatus;
2904 } else if (V_inService(vp) == 0 || V_blessed(vp) == 0) {
2907 } else if (V_inUse(vp) == 0) {
2918 #ifdef AFS_DEMAND_ATTACH_FS
2919 /* if no error, bump nUsers */
2922 VLRU_UpdateAccess_r(vp);
2925 VCancelReservation_r(rvp);
2928 if (client_ec && !*client_ec) {
2931 #else /* AFS_DEMAND_ATTACH_FS */
2932 /* if no error, bump nUsers */
2939 #endif /* AFS_DEMAND_ATTACH_FS */
2946 /***************************************************/
2947 /* Volume offline/detach routines */
2948 /***************************************************/
2950 /* caller MUST hold a heavyweight ref on vp */
2951 #ifdef AFS_DEMAND_ATTACH_FS
2953 VTakeOffline_r(register Volume * vp)
2957 assert(vp->nUsers > 0);
2958 assert(programType == fileServer);
2960 VCreateReservation_r(vp);
2961 VWaitExclusiveState_r(vp);
2963 vp->goingOffline = 1;
2964 V_needsSalvaged(vp) = 1;
2966 VRequestSalvage_r(&error, vp, SALVSYNC_ERROR, 0);
2967 VCancelReservation_r(vp);
2969 #else /* AFS_DEMAND_ATTACH_FS */
2971 VTakeOffline_r(register Volume * vp)
2973 assert(vp->nUsers > 0);
2974 assert(programType == fileServer);
2976 vp->goingOffline = 1;
2977 V_needsSalvaged(vp) = 1;
2979 #endif /* AFS_DEMAND_ATTACH_FS */
2982 VTakeOffline(register Volume * vp)
2990 * force a volume offline.
2992 * @param[in] vp volume object pointer
2993 * @param[in] flags flags (see note below)
2995 * @note the flag VOL_FORCEOFF_NOUPDATE is a recursion control flag
2996 * used when VUpdateVolume_r needs to call VForceOffline_r
2997 * (which in turn would normally call VUpdateVolume_r)
2999 * @see VUpdateVolume_r
3001 * @pre VOL_LOCK must be held.
3002 * for DAFS, caller must hold ref.
3004 * @note for DAFS, it _is safe_ to call this function from an
3007 * @post needsSalvaged flag is set.
3008 * for DAFS, salvage is requested.
3009 * no further references to the volume through the volume
3010 * package will be honored.
3011 * all file descriptor and vnode caches are invalidated.
3013 * @warning this is a heavy-handed interface. it results in
3014 * a volume going offline regardless of the current
3015 * reference count state.
3017 * @internal volume package internal use only
3020 VForceOffline_r(Volume * vp, int flags)
3024 #ifdef AFS_DEMAND_ATTACH_FS
3025 VChangeState_r(vp, VOL_STATE_ERROR);
3030 strcpy(V_offlineMessage(vp),
3031 "Forced offline due to internal error: volume needs to be salvaged");
3032 Log("Volume %u forced offline: it needs salvaging!\n", V_id(vp));
3035 vp->goingOffline = 0;
3036 V_needsSalvaged(vp) = 1;
3037 if (!(flags & VOL_FORCEOFF_NOUPDATE)) {
3038 VUpdateVolume_r(&error, vp, VOL_UPDATE_NOFORCEOFF);
3041 #ifdef AFS_DEMAND_ATTACH_FS
3042 VRequestSalvage_r(&error, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
3043 #endif /* AFS_DEMAND_ATTACH_FS */
3045 #ifdef AFS_PTHREAD_ENV
3046 assert(pthread_cond_broadcast(&vol_put_volume_cond) == 0);
3047 #else /* AFS_PTHREAD_ENV */
3048 LWP_NoYieldSignal(VPutVolume);
3049 #endif /* AFS_PTHREAD_ENV */
3051 VReleaseVolumeHandles_r(vp);
3055 * force a volume offline.
3057 * @param[in] vp volume object pointer
3059 * @see VForceOffline_r
3062 VForceOffline(Volume * vp)
3065 VForceOffline_r(vp, 0);
3069 /* The opposite of VAttachVolume. The volume header is written to disk, with
3070 the inUse bit turned off. A copy of the header is maintained in memory,
3071 however (which is why this is VOffline, not VDetach).
3074 VOffline_r(Volume * vp, char *message)
3077 VolumeId vid = V_id(vp);
3079 assert(programType != volumeUtility);
3084 if (V_offlineMessage(vp)[0] == '\0')
3085 strncpy(V_offlineMessage(vp), message, sizeof(V_offlineMessage(vp)));
3086 V_offlineMessage(vp)[sizeof(V_offlineMessage(vp)) - 1] = '\0';
3088 vp->goingOffline = 1;
3089 #ifdef AFS_DEMAND_ATTACH_FS
3090 VChangeState_r(vp, VOL_STATE_GOING_OFFLINE);
3091 VCreateReservation_r(vp);
3094 /* wait for the volume to go offline */
3095 if (V_attachState(vp) == VOL_STATE_GOING_OFFLINE) {
3096 VWaitStateChange_r(vp);
3098 VCancelReservation_r(vp);
3099 #else /* AFS_DEMAND_ATTACH_FS */
3101 vp = VGetVolume_r(&error, vid); /* Wait for it to go offline */
3102 if (vp) /* In case it was reattached... */
3104 #endif /* AFS_DEMAND_ATTACH_FS */
3108 VOffline(Volume * vp, char *message)
3111 VOffline_r(vp, message);
3115 /* This gets used for the most part by utility routines that don't want
3116 * to keep all the volume headers around. Generally, the file server won't
3117 * call this routine, because then the offline message in the volume header
3118 * (or other information) won't be available to clients. For NAMEI, also
3119 * close the file handles. However, the fileserver does call this during
3120 * an attach following a volume operation.
3123 VDetachVolume_r(Error * ec, Volume * vp)
3126 struct DiskPartition *tpartp;
3127 int notifyServer, useDone = FSYNC_VOL_ON;
3129 *ec = 0; /* always "succeeds" */
3130 if (programType == volumeUtility) {
3131 notifyServer = vp->needsPutBack;
3132 if (V_destroyMe(vp) == DESTROY_ME)
3133 useDone = FSYNC_VOL_DONE;
3134 #ifdef AFS_DEMAND_ATTACH_FS
3135 else if (!V_blessed(vp) || !V_inService(vp))
3136 useDone = FSYNC_VOL_LEAVE_OFF;
3139 tpartp = vp->partition;
3141 DeleteVolumeFromHashTable(vp);
3142 vp->shuttingDown = 1;
3143 #ifdef AFS_DEMAND_ATTACH_FS
3144 DeleteVolumeFromVByPList_r(vp);
3146 VChangeState_r(vp, VOL_STATE_SHUTTING_DOWN);
3147 #endif /* AFS_DEMAND_ATTACH_FS */
3149 /* Will be detached sometime in the future--this is OK since volume is offline */
3151 /* XXX the following code should really be moved to VCheckDetach() since the volume
3152 * is not technically detached until the refcounts reach zero
3154 #ifdef FSSYNC_BUILD_CLIENT
3155 if (programType == volumeUtility && notifyServer) {
3157 * Note: The server is not notified in the case of a bogus volume
3158 * explicitly to make it possible to create a volume, do a partial
3159 * restore, then abort the operation without ever putting the volume
3160 * online. This is essential in the case of a volume move operation
3161 * between two partitions on the same server. In that case, there
3162 * would be two instances of the same volume, one of them bogus,
3163 * which the file server would attempt to put on line
3165 FSYNC_VolOp(volume, tpartp->name, useDone, 0, NULL);
3166 /* XXX this code path is only hit by volume utilities, thus
3167 * V_BreakVolumeCallbacks will always be NULL. if we really
3168 * want to break callbacks in this path we need to use FSYNC_VolOp() */
3170 /* Dettaching it so break all callbacks on it */
3171 if (V_BreakVolumeCallbacks) {
3172 Log("volume %u detached; breaking all call backs\n", volume);
3173 (*V_BreakVolumeCallbacks) (volume);
3177 #endif /* FSSYNC_BUILD_CLIENT */
3181 VDetachVolume(Error * ec, Volume * vp)
3184 VDetachVolume_r(ec, vp);
3189 /***************************************************/
3190 /* Volume fd/inode handle closing routines */
3191 /***************************************************/
3193 /* For VDetachVolume, we close all cached file descriptors, but keep
3194 * the Inode handles in case we need to read from a busy volume.
3196 /* for demand attach, caller MUST hold ref count on vp */
3198 VCloseVolumeHandles_r(Volume * vp)
3200 #ifdef AFS_DEMAND_ATTACH_FS
3201 VolState state_save;
3203 state_save = VChangeState_r(vp, VOL_STATE_OFFLINING);
3208 * XXX need to investigate whether we can perform
3209 * DFlushVolume outside of vol_glock_mutex...
3211 * VCloseVnodeFiles_r drops the glock internally */
3212 DFlushVolume(V_id(vp));
3213 VCloseVnodeFiles_r(vp);
3215 #ifdef AFS_DEMAND_ATTACH_FS
3219 /* Too time consuming and unnecessary for the volserver */
3220 if (programType != volumeUtility) {
3221 IH_CONDSYNC(vp->vnodeIndex[vLarge].handle);
3222 IH_CONDSYNC(vp->vnodeIndex[vSmall].handle);
3223 IH_CONDSYNC(vp->diskDataHandle);
3225 IH_CONDSYNC(vp->linkHandle);
3226 #endif /* AFS_NT40_ENV */
3229 IH_REALLYCLOSE(vp->vnodeIndex[vLarge].handle);
3230 IH_REALLYCLOSE(vp->vnodeIndex[vSmall].handle);
3231 IH_REALLYCLOSE(vp->diskDataHandle);
3232 IH_REALLYCLOSE(vp->linkHandle);
3234 #ifdef AFS_DEMAND_ATTACH_FS
3236 VChangeState_r(vp, state_save);
3240 /* For both VForceOffline and VOffline, we close all relevant handles.
3241 * For VOffline, if we re-attach the volume, the files may possible be
3242 * different than before.
3244 /* for demand attach, caller MUST hold a ref count on vp */
3246 VReleaseVolumeHandles_r(Volume * vp)
3248 #ifdef AFS_DEMAND_ATTACH_FS
3249 VolState state_save;
3251 state_save = VChangeState_r(vp, VOL_STATE_DETACHING);
3254 /* XXX need to investigate whether we can perform
3255 * DFlushVolume outside of vol_glock_mutex... */
3256 DFlushVolume(V_id(vp));
3258 VReleaseVnodeFiles_r(vp); /* releases the glock internally */
3260 #ifdef AFS_DEMAND_ATTACH_FS
3264 /* Too time consuming and unnecessary for the volserver */
3265 if (programType != volumeUtility) {
3266 IH_CONDSYNC(vp->vnodeIndex[vLarge].handle);
3267 IH_CONDSYNC(vp->vnodeIndex[vSmall].handle);
3268 IH_CONDSYNC(vp->diskDataHandle);
3270 IH_CONDSYNC(vp->linkHandle);
3271 #endif /* AFS_NT40_ENV */
3274 IH_RELEASE(vp->vnodeIndex[vLarge].handle);
3275 IH_RELEASE(vp->vnodeIndex[vSmall].handle);
3276 IH_RELEASE(vp->diskDataHandle);
3277 IH_RELEASE(vp->linkHandle);
3279 #ifdef AFS_DEMAND_ATTACH_FS
3281 VChangeState_r(vp, state_save);
3286 /***************************************************/
3287 /* Volume write and fsync routines */
3288 /***************************************************/
3291 VUpdateVolume_r(Error * ec, Volume * vp, int flags)
3293 #ifdef AFS_DEMAND_ATTACH_FS
3294 VolState state_save;
3296 if (flags & VOL_UPDATE_WAIT) {
3297 VCreateReservation_r(vp);
3298 VWaitExclusiveState_r(vp);
3303 if (programType == fileServer)
3305 (V_inUse(vp) ? V_nextVnodeUnique(vp) +
3306 200 : V_nextVnodeUnique(vp));
3308 #ifdef AFS_DEMAND_ATTACH_FS
3309 state_save = VChangeState_r(vp, VOL_STATE_UPDATING);
3313 WriteVolumeHeader_r(ec, vp);
3315 #ifdef AFS_DEMAND_ATTACH_FS
3317 VChangeState_r(vp, state_save);
3318 if (flags & VOL_UPDATE_WAIT) {
3319 VCancelReservation_r(vp);
3324 Log("VUpdateVolume: error updating volume header, volume %u (%s)\n",
3325 V_id(vp), V_name(vp));
3326 /* try to update on-disk header,
3327 * while preventing infinite recursion */
3328 if (!(flags & VOL_UPDATE_NOFORCEOFF)) {
3329 VForceOffline_r(vp, VOL_FORCEOFF_NOUPDATE);
3335 VUpdateVolume(Error * ec, Volume * vp)
3338 VUpdateVolume_r(ec, vp, VOL_UPDATE_WAIT);
3343 VSyncVolume_r(Error * ec, Volume * vp, int flags)
3347 #ifdef AFS_DEMAND_ATTACH_FS
3348 VolState state_save;
3351 if (flags & VOL_SYNC_WAIT) {
3352 VUpdateVolume_r(ec, vp, VOL_UPDATE_WAIT);
3354 VUpdateVolume_r(ec, vp, 0);
3357 #ifdef AFS_DEMAND_ATTACH_FS
3358 state_save = VChangeState_r(vp, VOL_STATE_UPDATING);
3361 fdP = IH_OPEN(V_diskDataHandle(vp));
3362 assert(fdP != NULL);
3363 code = FDH_SYNC(fdP);
3366 #ifdef AFS_DEMAND_ATTACH_FS
3368 VChangeState_r(vp, state_save);
3374 VSyncVolume(Error * ec, Volume * vp)
3377 VSyncVolume_r(ec, vp, VOL_SYNC_WAIT);
3382 /***************************************************/
3383 /* Volume dealloaction routines */
3384 /***************************************************/
3386 #ifdef AFS_DEMAND_ATTACH_FS
3388 FreeVolume(Volume * vp)
3390 /* free the heap space, iff it's safe.
3391 * otherwise, pull it out of the hash table, so it
3392 * will get deallocated when all refs to it go away */
3393 if (!VCheckFree(vp)) {
3394 DeleteVolumeFromHashTable(vp);
3395 DeleteVolumeFromVByPList_r(vp);
3397 /* make sure we invalidate the header cache entry */
3398 FreeVolumeHeader(vp);
3401 #endif /* AFS_DEMAND_ATTACH_FS */
3404 ReallyFreeVolume(Volume * vp)
3409 #ifdef AFS_DEMAND_ATTACH_FS
3411 VChangeState_r(vp, VOL_STATE_FREED);
3412 if (vp->pending_vol_op)
3413 free(vp->pending_vol_op);
3414 #endif /* AFS_DEMAND_ATTACH_FS */
3415 for (i = 0; i < nVNODECLASSES; i++)
3416 if (vp->vnodeIndex[i].bitmap)
3417 free(vp->vnodeIndex[i].bitmap);
3418 FreeVolumeHeader(vp);
3419 #ifndef AFS_DEMAND_ATTACH_FS
3420 DeleteVolumeFromHashTable(vp);
3421 #endif /* AFS_DEMAND_ATTACH_FS */
3425 /* check to see if we should shutdown this volume
3426 * returns 1 if volume was freed, 0 otherwise */
3427 #ifdef AFS_DEMAND_ATTACH_FS
3429 VCheckDetach(register Volume * vp)
3433 if (vp->nUsers || vp->nWaiters)
3436 if (vp->shuttingDown) {
3438 VReleaseVolumeHandles_r(vp);
3440 ReallyFreeVolume(vp);
3441 if (programType == fileServer) {
3442 assert(pthread_cond_broadcast(&vol_put_volume_cond) == 0);
3447 #else /* AFS_DEMAND_ATTACH_FS */
3449 VCheckDetach(register Volume * vp)
3456 if (vp->shuttingDown) {
3458 VReleaseVolumeHandles_r(vp);
3459 ReallyFreeVolume(vp);
3460 if (programType == fileServer) {
3461 #if defined(AFS_PTHREAD_ENV)
3462 assert(pthread_cond_broadcast(&vol_put_volume_cond) == 0);
3463 #else /* AFS_PTHREAD_ENV */
3464 LWP_NoYieldSignal(VPutVolume);
3465 #endif /* AFS_PTHREAD_ENV */
3470 #endif /* AFS_DEMAND_ATTACH_FS */
3472 /* check to see if we should offline this volume
3473 * return 1 if volume went offline, 0 otherwise */
3474 #ifdef AFS_DEMAND_ATTACH_FS
3476 VCheckOffline(register Volume * vp)
3478 Volume * rvp = NULL;
3481 if (vp->goingOffline && !vp->nUsers) {
3483 assert(programType == fileServer);
3484 assert((V_attachState(vp) != VOL_STATE_ATTACHED) &&
3485 (V_attachState(vp) != VOL_STATE_FREED) &&
3486 (V_attachState(vp) != VOL_STATE_PREATTACHED) &&
3487 (V_attachState(vp) != VOL_STATE_UNATTACHED));
3491 * VOL_STATE_GOING_OFFLINE
3492 * VOL_STATE_SHUTTING_DOWN
3493 * VIsErrorState(V_attachState(vp))
3494 * VIsExclusiveState(V_attachState(vp))
3497 VCreateReservation_r(vp);
3498 VChangeState_r(vp, VOL_STATE_OFFLINING);
3501 /* must clear the goingOffline flag before we drop the glock */
3502 vp->goingOffline = 0;
3507 /* perform async operations */
3508 VUpdateVolume_r(&error, vp, 0);
3509 VCloseVolumeHandles_r(vp);
3512 Log("VOffline: Volume %u (%s) is now offline", V_id(vp),
3514 if (V_offlineMessage(vp)[0])
3515 Log(" (%s)", V_offlineMessage(vp));
3519 /* invalidate the volume header cache entry */
3520 FreeVolumeHeader(vp);
3522 /* if nothing changed state to error or salvaging,
3523 * drop state to unattached */
3524 if (!VIsErrorState(V_attachState(vp))) {
3525 VChangeState_r(vp, VOL_STATE_UNATTACHED);
3527 VCancelReservation_r(vp);
3528 /* no usage of vp is safe beyond this point */
3532 #else /* AFS_DEMAND_ATTACH_FS */
3534 VCheckOffline(register Volume * vp)
3536 Volume * rvp = NULL;
3539 if (vp->goingOffline && !vp->nUsers) {
3541 assert(programType == fileServer);
3544 vp->goingOffline = 0;
3546 VUpdateVolume_r(&error, vp, 0);
3547 VCloseVolumeHandles_r(vp);
3549 Log("VOffline: Volume %u (%s) is now offline", V_id(vp),
3551 if (V_offlineMessage(vp)[0])
3552 Log(" (%s)", V_offlineMessage(vp));
3555 FreeVolumeHeader(vp);
3556 #ifdef AFS_PTHREAD_ENV
3557 assert(pthread_cond_broadcast(&vol_put_volume_cond) == 0);
3558 #else /* AFS_PTHREAD_ENV */
3559 LWP_NoYieldSignal(VPutVolume);
3560 #endif /* AFS_PTHREAD_ENV */
3564 #endif /* AFS_DEMAND_ATTACH_FS */
3566 /***************************************************/
3567 /* demand attach fs ref counting routines */
3568 /***************************************************/
3570 #ifdef AFS_DEMAND_ATTACH_FS
3571 /* the following two functions handle reference counting for
3572 * asynchronous operations on volume structs.
3574 * their purpose is to prevent a VDetachVolume or VShutdown
3575 * from free()ing the Volume struct during an async i/o op */
3577 /* register with the async volume op ref counter */
3578 /* VCreateReservation_r moved into inline code header because it
3579 * is now needed in vnode.c -- tkeiser 11/20/2007
3583 * decrement volume-package internal refcount.
3585 * @param vp volume object pointer
3587 * @internal volume package internal use only
3590 * @arg VOL_LOCK is held
3591 * @arg lightweight refcount held
3593 * @post volume waiters refcount is decremented; volume may
3594 * have been deallocated/shutdown/offlined/salvaged/
3595 * whatever during the process
3597 * @warning once you have tossed your last reference (you can acquire
3598 * lightweight refs recursively) it is NOT SAFE to reference
3599 * a volume object pointer ever again
3601 * @see VCreateReservation_r
3603 * @note DEMAND_ATTACH_FS only
3606 VCancelReservation_r(Volume * vp)
3608 assert(--vp->nWaiters >= 0);
3609 if (vp->nWaiters == 0) {
3611 if (!VCheckDetach(vp)) {
3618 /* check to see if we should free this volume now
3619 * return 1 if volume was freed, 0 otherwise */
3621 VCheckFree(Volume * vp)
3624 if ((vp->nUsers == 0) &&
3625 (vp->nWaiters == 0) &&
3626 !(V_attachFlags(vp) & (VOL_IN_HASH |
3630 ReallyFreeVolume(vp);
3635 #endif /* AFS_DEMAND_ATTACH_FS */
3638 /***************************************************/
3639 /* online volume operations routines */
3640 /***************************************************/
3642 #ifdef AFS_DEMAND_ATTACH_FS
3644 * register a volume operation on a given volume.
3646 * @param[in] vp volume object
3647 * @param[in] vopinfo volume operation info object
3649 * @pre VOL_LOCK is held
3651 * @post volume operation info object attached to volume object.
3652 * volume operation statistics updated.
3654 * @note by "attached" we mean a copy of the passed in object is made
3656 * @internal volume package internal use only
3659 VRegisterVolOp_r(Volume * vp, FSSYNC_VolOp_info * vopinfo)
3661 FSSYNC_VolOp_info * info;
3663 /* attach a vol op info node to the volume struct */
3664 info = (FSSYNC_VolOp_info *) malloc(sizeof(FSSYNC_VolOp_info));
3665 assert(info != NULL);
3666 memcpy(info, vopinfo, sizeof(FSSYNC_VolOp_info));
3667 vp->pending_vol_op = info;
3670 vp->stats.last_vol_op = FT_ApproxTime();
3671 vp->stats.vol_ops++;
3672 IncUInt64(&VStats.vol_ops);
3678 * deregister the volume operation attached to this volume.
3680 * @param[in] vp volume object pointer
3682 * @pre VOL_LOCK is held
3684 * @post the volume operation info object is detached from the volume object
3686 * @internal volume package internal use only
3689 VDeregisterVolOp_r(Volume * vp)
3691 if (vp->pending_vol_op) {
3692 free(vp->pending_vol_op);
3693 vp->pending_vol_op = NULL;
3697 #endif /* AFS_DEMAND_ATTACH_FS */
3700 * determine whether it is safe to leave a volume online during
3701 * the volume operation described by the vopinfo object.
3703 * @param[in] vp volume object
3704 * @param[in] vopinfo volume operation info object
3706 * @return whether it is safe to leave volume online
3707 * @retval 0 it is NOT SAFE to leave the volume online
3708 * @retval 1 it is safe to leave the volume online during the operation
3711 * @arg VOL_LOCK is held
3712 * @arg disk header attached to vp (heavyweight ref on vp will guarantee
3713 * this condition is met)
3715 * @internal volume package internal use only
3718 VVolOpLeaveOnline_r(Volume * vp, FSSYNC_VolOp_info * vopinfo)
3720 return (vopinfo->com.command == FSYNC_VOL_NEEDVOLUME &&
3721 (vopinfo->com.reason == V_READONLY ||
3722 (!VolumeWriteable(vp) &&
3723 (vopinfo->com.reason == V_CLONE ||
3724 vopinfo->com.reason == V_DUMP))));
3728 * determine whether VBUSY should be set during this volume operation.
3730 * @param[in] vp volume object
3731 * @param[in] vopinfo volume operation info object
3733 * @return whether VBUSY should be set
3734 * @retval 0 VBUSY does NOT need to be set
3735 * @retval 1 VBUSY SHOULD be set
3737 * @pre VOL_LOCK is held
3739 * @internal volume package internal use only
3742 VVolOpSetVBusy_r(Volume * vp, FSSYNC_VolOp_info * vopinfo)
3744 return (vopinfo->com.command == FSYNC_VOL_NEEDVOLUME &&
3745 (vopinfo->com.reason == V_CLONE ||
3746 vopinfo->com.reason == V_DUMP));
3750 /***************************************************/
3751 /* online salvager routines */
3752 /***************************************************/
3753 #if defined(AFS_DEMAND_ATTACH_FS)
3754 #define SALVAGE_PRIO_UPDATE_INTERVAL 3 /**< number of seconds between prio updates */
3755 #define SALVAGE_COUNT_MAX 16 /**< number of online salvages we
3756 * allow before moving the volume
3757 * into a permanent error state
3759 * once this threshold is reached,
3760 * the operator will have to manually
3761 * issue a 'bos salvage' to bring
3762 * the volume back online
3766 * check whether a salvage needs to be performed on this volume.
3768 * @param[in] vp pointer to volume object
3770 * @return status code
3771 * @retval 0 no salvage scheduled
3772 * @retval 1 a salvage has been scheduled with the salvageserver
3774 * @pre VOL_LOCK is held
3776 * @post if salvage request flag is set and nUsers and nWaiters are zero,
3777 * then a salvage will be requested
3779 * @note this is one of the event handlers called by VCancelReservation_r
3781 * @see VCancelReservation_r
3783 * @internal volume package internal use only.
3786 VCheckSalvage(register Volume * vp)
3789 #ifdef SALVSYNC_BUILD_CLIENT
3790 if (vp->nUsers || vp->nWaiters)
3792 if (vp->salvage.requested) {
3793 VScheduleSalvage_r(vp);
3796 #endif /* SALVSYNC_BUILD_CLIENT */
3801 * request volume salvage.
3803 * @param[out] ec computed client error code
3804 * @param[in] vp volume object pointer
3805 * @param[in] reason reason code (passed to salvageserver via SALVSYNC)
3806 * @param[in] flags see flags note below
3809 * VOL_SALVAGE_INVALIDATE_HEADER causes volume header cache entry
3810 * to be invalidated.
3812 * @pre VOL_LOCK is held.
3814 * @post volume state is changed.
3815 * for fileserver, salvage will be requested once refcount reaches zero.
3817 * @return operation status code
3818 * @retval 0 volume salvage will occur
3819 * @retval 1 volume salvage could not be scheduled
3821 * @note DAFS fileserver only