2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
9 * Portions Copyright (c) 2005-2008 Sine Nomine Associates
12 /* 1/1/89: NB: this stuff is all going to be replaced. Don't take it too seriously */
17 Institution: The Information Technology Center, Carnegie-Mellon University
21 #include <afsconfig.h>
22 #include <afs/param.h>
26 #include <afs/afsint.h>
29 #include <sys/param.h>
30 #if !defined(AFS_SGI_ENV)
33 #else /* AFS_OSF_ENV */
34 #ifdef AFS_VFSINCL_ENV
37 #include <sys/fs/ufs_fs.h>
39 #if defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
40 #include <ufs/ufs/dinode.h>
41 #include <ufs/ffs/fs.h>
46 #else /* AFS_VFSINCL_ENV */
47 #if !defined(AFS_AIX_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_XBSD_ENV)
50 #endif /* AFS_VFSINCL_ENV */
51 #endif /* AFS_OSF_ENV */
52 #endif /* AFS_SGI_ENV */
53 #endif /* AFS_NT40_ENV */
71 #if defined(AFS_SUN_ENV) || defined(AFS_SUN5_ENV)
73 #include <sys/mnttab.h>
74 #include <sys/mntent.h>
80 #if defined(AFS_SGI_ENV)
85 #ifndef AFS_LINUX20_ENV
86 #include <fstab.h> /* Need to find in libc 5, present in libc 6 */
89 #endif /* AFS_SGI_ENV */
91 #endif /* AFS_HPUX_ENV */
95 #include <netinet/in.h>
100 #endif /* ITIMER_REAL */
101 #endif /* AFS_NT40_ENV */
102 #if defined(AFS_SUN5_ENV) || defined(AFS_NT40_ENV) || defined(AFS_LINUX20_ENV)
109 #include <afs/errors.h>
112 #include <afs/afssyscalls.h>
114 #include <afs/afsutil.h>
118 #include "daemon_com.h"
120 #include "salvsync.h"
123 #include "partition.h"
124 #include "volume_inline.h"
125 #ifdef AFS_PTHREAD_ENV
127 #else /* AFS_PTHREAD_ENV */
128 #include "afs/assert.h"
129 #endif /* AFS_PTHREAD_ENV */
136 #if !defined(offsetof)
141 #define afs_stat stat64
142 #define afs_fstat fstat64
143 #define afs_open open64
144 #else /* !O_LARGEFILE */
145 #define afs_stat stat
146 #define afs_fstat fstat
147 #define afs_open open
148 #endif /* !O_LARGEFILE */
150 #ifdef AFS_PTHREAD_ENV
151 pthread_mutex_t vol_glock_mutex;
152 pthread_mutex_t vol_trans_mutex;
153 pthread_cond_t vol_put_volume_cond;
154 pthread_cond_t vol_sleep_cond;
155 int vol_attach_threads = 1;
156 #endif /* AFS_PTHREAD_ENV */
158 #ifdef AFS_DEMAND_ATTACH_FS
159 pthread_mutex_t vol_salvsync_mutex;
160 #endif /* AFS_DEMAND_ATTACH_FS */
163 extern void *calloc(), *realloc();
166 /*@printflike@*/ extern void Log(const char *format, ...);
168 /* Forward declarations */
169 static Volume *attach2(Error * ec, VolId vid, char *path,
170 register struct VolumeHeader *header,
171 struct DiskPartition64 *partp, Volume * vp,
172 int isbusy, int mode);
173 static void ReallyFreeVolume(Volume * vp);
174 #ifdef AFS_DEMAND_ATTACH_FS
175 static void FreeVolume(Volume * vp);
176 #else /* !AFS_DEMAND_ATTACH_FS */
177 #define FreeVolume(vp) ReallyFreeVolume(vp)
178 static void VScanUpdateList(void);
179 #endif /* !AFS_DEMAND_ATTACH_FS */
180 static void VInitVolumeHeaderCache(afs_uint32 howMany);
181 static int GetVolumeHeader(register Volume * vp);
182 static void ReleaseVolumeHeader(register struct volHeader *hd);
183 static void FreeVolumeHeader(register Volume * vp);
184 static void AddVolumeToHashTable(register Volume * vp, int hashid);
185 static void DeleteVolumeFromHashTable(register Volume * vp);
187 static int VHold(Volume * vp);
189 static int VHold_r(Volume * vp);
190 static void VGetBitmap_r(Error * ec, Volume * vp, VnodeClass class);
191 static void VReleaseVolumeHandles_r(Volume * vp);
192 static void VCloseVolumeHandles_r(Volume * vp);
193 static void LoadVolumeHeader(Error * ec, Volume * vp);
194 static int VCheckOffline(register Volume * vp);
195 static int VCheckDetach(register Volume * vp);
196 static Volume * GetVolume(Error * ec, Error * client_ec, VolId volumeId, Volume * hint, int flags);
197 #ifdef AFS_DEMAND_ATTACH_FS
198 static int VolumeExternalName_r(VolumeId volumeId, char * name, size_t len);
201 int LogLevel; /* Vice loglevel--not defined as extern so that it will be
202 * defined when not linked with vice, XXXX */
203 ProgramType programType; /* The type of program using the package */
205 /* extended volume package statistics */
208 #ifdef VOL_LOCK_DEBUG
209 pthread_t vol_glock_holder = 0;
213 #define VOLUME_BITMAP_GROWSIZE 16 /* bytes, => 128vnodes */
214 /* Must be a multiple of 4 (1 word) !! */
216 /* this parameter needs to be tunable at runtime.
217 * 128 was really inadequate for largish servers -- at 16384 volumes this
218 * puts average chain length at 128, thus an average 65 deref's to find a volptr.
219 * talk about bad spatial locality...
221 * an AVL or splay tree might work a lot better, but we'll just increase
222 * the default hash table size for now
224 #define DEFAULT_VOLUME_HASH_SIZE 256 /* Must be a power of 2!! */
225 #define DEFAULT_VOLUME_HASH_MASK (DEFAULT_VOLUME_HASH_SIZE-1)
226 #define VOLUME_HASH(volumeId) (volumeId&(VolumeHashTable.Mask))
229 * turn volume hash chains into partially ordered lists.
230 * when the threshold is exceeded between two adjacent elements,
231 * perform a chain rebalancing operation.
233 * keep the threshold high in order to keep cache line invalidates
234 * low "enough" on SMPs
236 #define VOLUME_HASH_REORDER_THRESHOLD 200
239 * when possible, don't just reorder single elements, but reorder
240 * entire chains of elements at once. a chain of elements that
241 * exceed the element previous to the pivot by at least CHAIN_THRESH
242 * accesses are moved in front of the chain whose elements have at
243 * least CHAIN_THRESH less accesses than the pivot element
245 #define VOLUME_HASH_REORDER_CHAIN_THRESH (VOLUME_HASH_REORDER_THRESHOLD / 2)
247 #include "rx/rx_queue.h"
250 VolumeHashTable_t VolumeHashTable = {
251 DEFAULT_VOLUME_HASH_SIZE,
252 DEFAULT_VOLUME_HASH_MASK,
257 static void VInitVolumeHash(void);
261 /* This macro is used where an ffs() call does not exist. Was in util/ffs.c */
265 afs_int32 ffs_tmp = x;
269 for (ffs_i = 1;; ffs_i++) {
276 #endif /* !AFS_HAVE_FFS */
278 #ifdef AFS_PTHREAD_ENV
279 typedef struct diskpartition_queue_t {
280 struct rx_queue queue;
281 struct DiskPartition64 * diskP;
282 } diskpartition_queue_t;
283 typedef struct vinitvolumepackage_thread_t {
284 struct rx_queue queue;
285 pthread_cond_t thread_done_cv;
286 int n_threads_complete;
287 } vinitvolumepackage_thread_t;
288 static void * VInitVolumePackageThread(void * args);
289 #endif /* AFS_PTHREAD_ENV */
291 static int VAttachVolumesByPartition(struct DiskPartition64 *diskP,
292 int * nAttached, int * nUnattached);
295 #ifdef AFS_DEMAND_ATTACH_FS
296 /* demand attach fileserver extensions */
299 * in the future we will support serialization of VLRU state into the fs_state
302 * these structures are the beginning of that effort
304 struct VLRU_DiskHeader {
305 struct versionStamp stamp; /* magic and structure version number */
306 afs_uint32 mtime; /* time of dump to disk */
307 afs_uint32 num_records; /* number of VLRU_DiskEntry records */
310 struct VLRU_DiskEntry {
311 afs_uint32 vid; /* volume ID */
312 afs_uint32 idx; /* generation */
313 afs_uint32 last_get; /* timestamp of last get */
316 struct VLRU_StartupQueue {
317 struct VLRU_DiskEntry * entry;
322 typedef struct vshutdown_thread_t {
324 pthread_mutex_t lock;
326 pthread_cond_t master_cv;
328 int n_threads_complete;
330 int schedule_version;
333 byte n_parts_done_pass;
334 byte part_thread_target[VOLMAXPARTS+1];
335 byte part_done_pass[VOLMAXPARTS+1];
336 struct rx_queue * part_pass_head[VOLMAXPARTS+1];
337 int stats[4][VOLMAXPARTS+1];
338 } vshutdown_thread_t;
339 static void * VShutdownThread(void * args);
342 static Volume * VAttachVolumeByVp_r(Error * ec, Volume * vp, int mode);
343 static int VCheckFree(Volume * vp);
346 static void AddVolumeToVByPList_r(Volume * vp);
347 static void DeleteVolumeFromVByPList_r(Volume * vp);
348 static void VVByPListBeginExclusive_r(struct DiskPartition64 * dp);
349 static void VVByPListEndExclusive_r(struct DiskPartition64 * dp);
350 static void VVByPListWait_r(struct DiskPartition64 * dp);
352 /* online salvager */
353 static int VCheckSalvage(register Volume * vp);
354 static int VUpdateSalvagePriority_r(Volume * vp);
355 static int VScheduleSalvage_r(Volume * vp);
357 /* Volume hash table */
358 static void VReorderHash_r(VolumeHashChainHead * head, Volume * pp, Volume * vp);
359 static void VHashBeginExclusive_r(VolumeHashChainHead * head);
360 static void VHashEndExclusive_r(VolumeHashChainHead * head);
361 static void VHashWait_r(VolumeHashChainHead * head);
364 static int ShutdownVByPForPass_r(struct DiskPartition64 * dp, int pass);
365 static int ShutdownVolumeWalk_r(struct DiskPartition64 * dp, int pass,
366 struct rx_queue ** idx);
367 static void ShutdownController(vshutdown_thread_t * params);
368 static void ShutdownCreateSchedule(vshutdown_thread_t * params);
371 static void VLRU_ComputeConstants(void);
372 static void VInitVLRU(void);
373 static void VLRU_Init_Node_r(Volume * vp);
374 static void VLRU_Add_r(Volume * vp);
375 static void VLRU_Delete_r(Volume * vp);
376 static void VLRU_UpdateAccess_r(Volume * vp);
377 static void * VLRU_ScannerThread(void * args);
378 static void VLRU_Scan_r(int idx);
379 static void VLRU_Promote_r(int idx);
380 static void VLRU_Demote_r(int idx);
381 static void VLRU_SwitchQueues(Volume * vp, int new_idx, int append);
384 static int VCheckSoftDetach(Volume * vp, afs_uint32 thresh);
385 static int VCheckSoftDetachCandidate(Volume * vp, afs_uint32 thresh);
386 static int VSoftDetachVolume_r(Volume * vp, afs_uint32 thresh);
389 pthread_key_t VThread_key;
390 VThreadOptions_t VThread_defaults = {
391 0 /**< allow salvsync */
393 #endif /* AFS_DEMAND_ATTACH_FS */
396 struct Lock vol_listLock; /* Lock obtained when listing volumes:
397 * prevents a volume from being missed
398 * if the volume is attached during a
402 static int TimeZoneCorrection; /* Number of seconds west of GMT */
404 /* Common message used when the volume goes off line */
405 char *VSalvageMessage =
406 "Files in this volume are currently unavailable; call operations";
408 int VInit; /* 0 - uninitialized,
409 * 1 - initialized but not all volumes have been attached,
410 * 2 - initialized and all volumes have been attached,
411 * 3 - initialized, all volumes have been attached, and
412 * VConnectFS() has completed. */
415 bit32 VolumeCacheCheck; /* Incremented everytime a volume goes on line--
416 * used to stamp volume headers and in-core
417 * vnodes. When the volume goes on-line the
418 * vnode will be invalidated
419 * access only with VOL_LOCK held */
424 /***************************************************/
425 /* Startup routines */
426 /***************************************************/
429 VInitVolumePackage(ProgramType pt, afs_uint32 nLargeVnodes, afs_uint32 nSmallVnodes,
430 int connect, afs_uint32 volcache)
432 int errors = 0; /* Number of errors while finding vice partitions. */
438 memset(&VStats, 0, sizeof(VStats));
439 VStats.hdr_cache_size = 200;
441 VInitPartitionPackage();
443 #ifdef AFS_DEMAND_ATTACH_FS
444 if (programType == fileServer) {
447 VLRU_SetOptions(VLRU_SET_ENABLED, 0);
449 assert(pthread_key_create(&VThread_key, NULL) == 0);
452 #ifdef AFS_PTHREAD_ENV
453 assert(pthread_mutex_init(&vol_glock_mutex, NULL) == 0);
454 assert(pthread_mutex_init(&vol_trans_mutex, NULL) == 0);
455 assert(pthread_cond_init(&vol_put_volume_cond, NULL) == 0);
456 assert(pthread_cond_init(&vol_sleep_cond, NULL) == 0);
457 #else /* AFS_PTHREAD_ENV */
459 #endif /* AFS_PTHREAD_ENV */
460 Lock_Init(&vol_listLock);
462 srandom(time(0)); /* For VGetVolumeInfo */
463 gettimeofday(&tv, &tz);
464 TimeZoneCorrection = tz.tz_minuteswest * 60;
466 #ifdef AFS_DEMAND_ATTACH_FS
467 assert(pthread_mutex_init(&vol_salvsync_mutex, NULL) == 0);
468 #endif /* AFS_DEMAND_ATTACH_FS */
470 /* Ok, we have done enough initialization that fileserver can
471 * start accepting calls, even though the volumes may not be
472 * available just yet.
476 #if defined(AFS_DEMAND_ATTACH_FS) && defined(SALVSYNC_BUILD_SERVER)
477 if (programType == salvageServer) {
480 #endif /* AFS_DEMAND_ATTACH_FS */
481 #ifdef FSSYNC_BUILD_SERVER
482 if (programType == fileServer) {
486 #if defined(AFS_DEMAND_ATTACH_FS) && defined(SALVSYNC_BUILD_CLIENT)
487 if (programType == fileServer) {
488 /* establish a connection to the salvager at this point */
489 assert(VConnectSALV() != 0);
491 #endif /* AFS_DEMAND_ATTACH_FS */
493 if (volcache > VStats.hdr_cache_size)
494 VStats.hdr_cache_size = volcache;
495 VInitVolumeHeaderCache(VStats.hdr_cache_size);
497 VInitVnodes(vLarge, nLargeVnodes);
498 VInitVnodes(vSmall, nSmallVnodes);
501 errors = VAttachPartitions();
505 if (programType == fileServer) {
506 struct DiskPartition64 *diskP;
507 #ifdef AFS_PTHREAD_ENV
508 struct vinitvolumepackage_thread_t params;
509 struct diskpartition_queue_t * dpq;
510 int i, threads, parts;
512 pthread_attr_t attrs;
514 assert(pthread_cond_init(¶ms.thread_done_cv,NULL) == 0);
516 params.n_threads_complete = 0;
518 /* create partition work queue */
519 for (parts=0, diskP = DiskPartitionList; diskP; diskP = diskP->next, parts++) {
520 dpq = (diskpartition_queue_t *) malloc(sizeof(struct diskpartition_queue_t));
523 queue_Append(¶ms,dpq);
526 threads = MIN(parts, vol_attach_threads);
529 /* spawn off a bunch of initialization threads */
530 assert(pthread_attr_init(&attrs) == 0);
531 assert(pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED) == 0);
533 Log("VInitVolumePackage: beginning parallel fileserver startup\n");
534 #ifdef AFS_DEMAND_ATTACH_FS
535 Log("VInitVolumePackage: using %d threads to pre-attach volumes on %d partitions\n",
537 #else /* AFS_DEMAND_ATTACH_FS */
538 Log("VInitVolumePackage: using %d threads to attach volumes on %d partitions\n",
540 #endif /* AFS_DEMAND_ATTACH_FS */
543 for (i=0; i < threads; i++) {
544 assert(pthread_create
545 (&tid, &attrs, &VInitVolumePackageThread,
549 while(params.n_threads_complete < threads) {
550 VOL_CV_WAIT(¶ms.thread_done_cv);
554 assert(pthread_attr_destroy(&attrs) == 0);
556 /* if we're only going to run one init thread, don't bother creating
558 Log("VInitVolumePackage: beginning single-threaded fileserver startup\n");
559 #ifdef AFS_DEMAND_ATTACH_FS
560 Log("VInitVolumePackage: using 1 thread to pre-attach volumes on %d partition(s)\n",
562 #else /* AFS_DEMAND_ATTACH_FS */
563 Log("VInitVolumePackage: using 1 thread to attach volumes on %d partition(s)\n",
565 #endif /* AFS_DEMAND_ATTACH_FS */
567 VInitVolumePackageThread(¶ms);
570 assert(pthread_cond_destroy(¶ms.thread_done_cv) == 0);
572 #else /* AFS_PTHREAD_ENV */
574 /* Attach all the volumes in this partition */
575 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
576 int nAttached = 0, nUnattached = 0;
577 assert(VAttachVolumesByPartition(diskP, &nAttached, &nUnattached) == 0);
579 #endif /* AFS_PTHREAD_ENV */
582 VInit = 2; /* Initialized, and all volumes have been attached */
583 #ifdef FSSYNC_BUILD_CLIENT
584 if (programType == volumeUtility && connect) {
586 Log("Unable to connect to file server; will retry at need\n");
590 #ifdef AFS_DEMAND_ATTACH_FS
591 else if (programType == salvageServer) {
593 Log("Unable to connect to file server; aborted\n");
597 #endif /* AFS_DEMAND_ATTACH_FS */
598 #endif /* FSSYNC_BUILD_CLIENT */
602 #ifdef AFS_PTHREAD_ENV
604 VInitVolumePackageThread(void * args) {
606 struct DiskPartition64 *diskP;
607 struct vinitvolumepackage_thread_t * params;
608 struct diskpartition_queue_t * dpq;
610 params = (vinitvolumepackage_thread_t *) args;
614 /* Attach all the volumes in this partition */
615 while (queue_IsNotEmpty(params)) {
616 int nAttached = 0, nUnattached = 0;
618 dpq = queue_First(params,diskpartition_queue_t);
624 assert(VAttachVolumesByPartition(diskP, &nAttached, &nUnattached) == 0);
629 params->n_threads_complete++;
630 pthread_cond_signal(¶ms->thread_done_cv);
634 #endif /* AFS_PTHREAD_ENV */
637 * attach all volumes on a given disk partition
640 VAttachVolumesByPartition(struct DiskPartition64 *diskP, int * nAttached, int * nUnattached)
646 Log("Partition %s: attaching volumes\n", diskP->name);
647 dirp = opendir(VPartitionPath(diskP));
649 Log("opendir on Partition %s failed!\n", diskP->name);
653 while ((dp = readdir(dirp))) {
655 p = strrchr(dp->d_name, '.');
656 if (p != NULL && strcmp(p, VHDREXT) == 0) {
659 #ifdef AFS_DEMAND_ATTACH_FS
660 vp = VPreAttachVolumeByName(&error, diskP->name, dp->d_name);
661 #else /* AFS_DEMAND_ATTACH_FS */
662 vp = VAttachVolumeByName(&error, diskP->name, dp->d_name,
664 #endif /* AFS_DEMAND_ATTACH_FS */
665 (*(vp ? nAttached : nUnattached))++;
666 if (error == VOFFLINE)
667 Log("Volume %d stays offline (/vice/offline/%s exists)\n", VolumeNumber(dp->d_name), dp->d_name);
668 else if (LogLevel >= 5) {
669 Log("Partition %s: attached volume %d (%s)\n",
670 diskP->name, VolumeNumber(dp->d_name),
673 #if !defined(AFS_DEMAND_ATTACH_FS)
677 #endif /* AFS_DEMAND_ATTACH_FS */
681 Log("Partition %s: attached %d volumes; %d volumes not attached\n", diskP->name, *nAttached, *nUnattached);
687 /***************************************************/
688 /* Shutdown routines */
689 /***************************************************/
693 * highly multithreaded volume package shutdown
695 * with the demand attach fileserver extensions,
696 * VShutdown has been modified to be multithreaded.
697 * In order to achieve optimal use of many threads,
698 * the shutdown code involves one control thread and
699 * n shutdown worker threads. The control thread
700 * periodically examines the number of volumes available
701 * for shutdown on each partition, and produces a worker
702 * thread allocation schedule. The idea is to eliminate
703 * redundant scheduling computation on the workers by
704 * having a single master scheduler.
706 * The scheduler's objectives are:
708 * each partition with volumes remaining gets allocated
709 * at least 1 thread (assuming sufficient threads)
711 * threads are allocated proportional to the number of
712 * volumes remaining to be offlined. This ensures that
713 * the OS I/O scheduler has many requests to elevator
714 * seek on partitions that will (presumably) take the
715 * longest amount of time (from now) to finish shutdown
716 * (3) keep threads busy
717 * when there are extra threads, they are assigned to
718 * partitions using a simple round-robin algorithm
720 * In the future, we may wish to add the ability to adapt
721 * to the relative performance patterns of each disk
726 * multi-step shutdown process
728 * demand attach shutdown is a four-step process. Each
729 * shutdown "pass" shuts down increasingly more difficult
730 * volumes. The main purpose is to achieve better cache
731 * utilization during shutdown.
734 * shutdown volumes in the unattached, pre-attached
737 * shutdown attached volumes with cached volume headers
739 * shutdown all volumes in non-exclusive states
741 * shutdown all remaining volumes
748 register Volume *vp, *np;
749 register afs_int32 code;
750 #ifdef AFS_DEMAND_ATTACH_FS
751 struct DiskPartition64 * diskP;
752 struct diskpartition_queue_t * dpq;
753 vshutdown_thread_t params;
755 pthread_attr_t attrs;
757 memset(¶ms, 0, sizeof(vshutdown_thread_t));
759 for (params.n_parts=0, diskP = DiskPartitionList;
760 diskP; diskP = diskP->next, params.n_parts++);
762 Log("VShutdown: shutting down on-line volumes on %d partition%s...\n",
763 params.n_parts, params.n_parts > 1 ? "s" : "");
765 if (vol_attach_threads > 1) {
766 /* prepare for parallel shutdown */
767 params.n_threads = vol_attach_threads;
768 assert(pthread_mutex_init(¶ms.lock, NULL) == 0);
769 assert(pthread_cond_init(¶ms.cv, NULL) == 0);
770 assert(pthread_cond_init(¶ms.master_cv, NULL) == 0);
771 assert(pthread_attr_init(&attrs) == 0);
772 assert(pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED) == 0);
775 /* setup the basic partition information structures for
776 * parallel shutdown */
777 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
779 struct rx_queue * qp, * nqp;
783 VVByPListWait_r(diskP);
784 VVByPListBeginExclusive_r(diskP);
787 for (queue_Scan(&diskP->vol_list, qp, nqp, rx_queue)) {
788 vp = (Volume *)((char *)qp - offsetof(Volume, vol_list));
792 Log("VShutdown: partition %s has %d volumes with attached headers\n",
793 VPartitionPath(diskP), count);
796 /* build up the pass 0 shutdown work queue */
797 dpq = (struct diskpartition_queue_t *) malloc(sizeof(struct diskpartition_queue_t));
800 queue_Prepend(¶ms, dpq);
802 params.part_pass_head[diskP->index] = queue_First(&diskP->vol_list, rx_queue);
805 Log("VShutdown: beginning parallel fileserver shutdown\n");
806 Log("VShutdown: using %d threads to offline volumes on %d partition%s\n",
807 vol_attach_threads, params.n_parts, params.n_parts > 1 ? "s" : "" );
809 /* do pass 0 shutdown */
810 assert(pthread_mutex_lock(¶ms.lock) == 0);
811 for (i=0; i < params.n_threads; i++) {
812 assert(pthread_create
813 (&tid, &attrs, &VShutdownThread,
817 /* wait for all the pass 0 shutdowns to complete */
818 while (params.n_threads_complete < params.n_threads) {
819 assert(pthread_cond_wait(¶ms.master_cv, ¶ms.lock) == 0);
821 params.n_threads_complete = 0;
823 assert(pthread_cond_broadcast(¶ms.cv) == 0);
824 assert(pthread_mutex_unlock(¶ms.lock) == 0);
826 Log("VShutdown: pass 0 completed using the 1 thread per partition algorithm\n");
827 Log("VShutdown: starting passes 1 through 3 using finely-granular mp-fast algorithm\n");
829 /* run the parallel shutdown scheduler. it will drop the glock internally */
830 ShutdownController(¶ms);
832 /* wait for all the workers to finish pass 3 and terminate */
833 while (params.pass < 4) {
834 VOL_CV_WAIT(¶ms.cv);
837 assert(pthread_attr_destroy(&attrs) == 0);
838 assert(pthread_cond_destroy(¶ms.cv) == 0);
839 assert(pthread_cond_destroy(¶ms.master_cv) == 0);
840 assert(pthread_mutex_destroy(¶ms.lock) == 0);
842 /* drop the VByPList exclusive reservations */
843 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
844 VVByPListEndExclusive_r(diskP);
845 Log("VShutdown: %s stats : (pass[0]=%d, pass[1]=%d, pass[2]=%d, pass[3]=%d)\n",
846 VPartitionPath(diskP),
847 params.stats[0][diskP->index],
848 params.stats[1][diskP->index],
849 params.stats[2][diskP->index],
850 params.stats[3][diskP->index]);
853 Log("VShutdown: shutdown finished using %d threads\n", params.n_threads);
855 /* if we're only going to run one shutdown thread, don't bother creating
857 Log("VShutdown: beginning single-threaded fileserver shutdown\n");
859 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
860 VShutdownByPartition_r(diskP);
864 Log("VShutdown: complete.\n");
865 #else /* AFS_DEMAND_ATTACH_FS */
866 Log("VShutdown: shutting down on-line volumes...\n");
867 for (i = 0; i < VolumeHashTable.Size; i++) {
868 /* try to hold first volume in the hash table */
869 for (queue_Scan(&VolumeHashTable.Table[i],vp,np,Volume)) {
873 Log("VShutdown: Attempting to take volume %u offline.\n",
876 /* next, take the volume offline (drops reference count) */
877 VOffline_r(vp, "File server was shut down");
881 Log("VShutdown: complete.\n");
882 #endif /* AFS_DEMAND_ATTACH_FS */
893 #ifdef AFS_DEMAND_ATTACH_FS
896 * shutdown control thread
899 ShutdownController(vshutdown_thread_t * params)
902 struct DiskPartition64 * diskP;
904 vshutdown_thread_t shadow;
906 ShutdownCreateSchedule(params);
908 while ((params->pass < 4) &&
909 (params->n_threads_complete < params->n_threads)) {
910 /* recompute schedule once per second */
912 memcpy(&shadow, params, sizeof(vshutdown_thread_t));
916 Log("ShutdownController: schedule version=%d, vol_remaining=%d, pass=%d\n",
917 shadow.schedule_version, shadow.vol_remaining, shadow.pass);
918 Log("ShutdownController: n_threads_complete=%d, n_parts_done_pass=%d\n",
919 shadow.n_threads_complete, shadow.n_parts_done_pass);
920 for (diskP = DiskPartitionList; diskP; diskP=diskP->next) {
922 Log("ShutdownController: part[%d] : (len=%d, thread_target=%d, done_pass=%d, pass_head=%p)\n",
925 shadow.part_thread_target[id],
926 shadow.part_done_pass[id],
927 shadow.part_pass_head[id]);
933 ShutdownCreateSchedule(params);
937 /* create the shutdown thread work schedule.
938 * this scheduler tries to implement fairness
939 * by allocating at least 1 thread to each
940 * partition with volumes to be shutdown,
941 * and then it attempts to allocate remaining
942 * threads based upon the amount of work left
945 ShutdownCreateSchedule(vshutdown_thread_t * params)
947 struct DiskPartition64 * diskP;
948 int sum, thr_workload, thr_left;
949 int part_residue[VOLMAXPARTS+1];
952 /* compute the total number of outstanding volumes */
954 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
955 sum += diskP->vol_list.len;
958 params->schedule_version++;
959 params->vol_remaining = sum;
964 /* compute average per-thread workload */
965 thr_workload = sum / params->n_threads;
966 if (sum % params->n_threads)
969 thr_left = params->n_threads;
970 memset(&part_residue, 0, sizeof(part_residue));
972 /* for fairness, give every partition with volumes remaining
973 * at least one thread */
974 for (diskP = DiskPartitionList; diskP && thr_left; diskP = diskP->next) {
976 if (diskP->vol_list.len) {
977 params->part_thread_target[id] = 1;
980 params->part_thread_target[id] = 0;
984 if (thr_left && thr_workload) {
985 /* compute length-weighted workloads */
988 for (diskP = DiskPartitionList; diskP && thr_left; diskP = diskP->next) {
990 delta = (diskP->vol_list.len / thr_workload) -
991 params->part_thread_target[id];
995 if (delta < thr_left) {
996 params->part_thread_target[id] += delta;
999 params->part_thread_target[id] += thr_left;
1007 /* try to assign any leftover threads to partitions that
1008 * had volume lengths closer to needing thread_target+1 */
1009 int max_residue, max_id;
1011 /* compute the residues */
1012 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1014 part_residue[id] = diskP->vol_list.len -
1015 (params->part_thread_target[id] * thr_workload);
1018 /* now try to allocate remaining threads to partitions with the
1019 * highest residues */
1022 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1024 if (part_residue[id] > max_residue) {
1025 max_residue = part_residue[id];
1034 params->part_thread_target[max_id]++;
1036 part_residue[max_id] = 0;
1041 /* punt and give any remaining threads equally to each partition */
1043 if (thr_left >= params->n_parts) {
1044 alloc = thr_left / params->n_parts;
1045 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1047 params->part_thread_target[id] += alloc;
1052 /* finish off the last of the threads */
1053 for (diskP = DiskPartitionList; thr_left && diskP; diskP = diskP->next) {
1055 params->part_thread_target[id]++;
1061 /* worker thread for parallel shutdown */
1063 VShutdownThread(void * args)
1065 struct rx_queue *qp;
1067 vshutdown_thread_t * params;
1068 int part, code, found, pass, schedule_version_save, count;
1069 struct DiskPartition64 *diskP;
1070 struct diskpartition_queue_t * dpq;
1073 params = (vshutdown_thread_t *) args;
1075 /* acquire the shutdown pass 0 lock */
1076 assert(pthread_mutex_lock(¶ms->lock) == 0);
1078 /* if there's still pass 0 work to be done,
1079 * get a work entry, and do a pass 0 shutdown */
1080 if (queue_IsNotEmpty(params)) {
1081 dpq = queue_First(params, diskpartition_queue_t);
1083 assert(pthread_mutex_unlock(¶ms->lock) == 0);
1089 while (ShutdownVolumeWalk_r(diskP, 0, ¶ms->part_pass_head[id]))
1091 params->stats[0][diskP->index] = count;
1092 assert(pthread_mutex_lock(¶ms->lock) == 0);
1095 params->n_threads_complete++;
1096 if (params->n_threads_complete == params->n_threads) {
1097 /* notify control thread that all workers have completed pass 0 */
1098 assert(pthread_cond_signal(¶ms->master_cv) == 0);
1100 while (params->pass == 0) {
1101 assert(pthread_cond_wait(¶ms->cv, ¶ms->lock) == 0);
1105 assert(pthread_mutex_unlock(¶ms->lock) == 0);
1108 pass = params->pass;
1111 /* now escalate through the more complicated shutdowns */
1113 schedule_version_save = params->schedule_version;
1115 /* find a disk partition to work on */
1116 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1118 if (params->part_thread_target[id] && !params->part_done_pass[id]) {
1119 params->part_thread_target[id]--;
1126 /* hmm. for some reason the controller thread couldn't find anything for
1127 * us to do. let's see if there's anything we can do */
1128 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1130 if (diskP->vol_list.len && !params->part_done_pass[id]) {
1133 } else if (!params->part_done_pass[id]) {
1134 params->part_done_pass[id] = 1;
1135 params->n_parts_done_pass++;
1137 Log("VShutdown: done shutting down volumes on partition %s.\n",
1138 VPartitionPath(diskP));
1144 /* do work on this partition until either the controller
1145 * creates a new schedule, or we run out of things to do
1146 * on this partition */
1149 while (!params->part_done_pass[id] &&
1150 (schedule_version_save == params->schedule_version)) {
1151 /* ShutdownVolumeWalk_r will drop the glock internally */
1152 if (!ShutdownVolumeWalk_r(diskP, pass, ¶ms->part_pass_head[id])) {
1153 if (!params->part_done_pass[id]) {
1154 params->part_done_pass[id] = 1;
1155 params->n_parts_done_pass++;
1157 Log("VShutdown: done shutting down volumes on partition %s.\n",
1158 VPartitionPath(diskP));
1166 params->stats[pass][id] += count;
1168 /* ok, everyone is done this pass, proceed */
1171 params->n_threads_complete++;
1172 while (params->pass == pass) {
1173 if (params->n_threads_complete == params->n_threads) {
1174 /* we are the last thread to complete, so we will
1175 * reinitialize worker pool state for the next pass */
1176 params->n_threads_complete = 0;
1177 params->n_parts_done_pass = 0;
1179 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1181 params->part_done_pass[id] = 0;
1182 params->part_pass_head[id] = queue_First(&diskP->vol_list, rx_queue);
1185 /* compute a new thread schedule before releasing all the workers */
1186 ShutdownCreateSchedule(params);
1188 /* wake up all the workers */
1189 assert(pthread_cond_broadcast(¶ms->cv) == 0);
1192 Log("VShutdown: pass %d completed using %d threads on %d partitions\n",
1193 pass, params->n_threads, params->n_parts);
1196 VOL_CV_WAIT(¶ms->cv);
1199 pass = params->pass;
1213 /* shut down all volumes on a given disk partition
1215 * note that this function will not allow mp-fast
1216 * shutdown of a partition */
1218 VShutdownByPartition_r(struct DiskPartition64 * dp)
1224 /* wait for other exclusive ops to finish */
1225 VVByPListWait_r(dp);
1227 /* begin exclusive access */
1228 VVByPListBeginExclusive_r(dp);
1230 /* pick the low-hanging fruit first,
1231 * then do the complicated ones last
1232 * (has the advantage of keeping
1233 * in-use volumes up until the bitter end) */
1234 for (pass = 0, total=0; pass < 4; pass++) {
1235 pass_stats[pass] = ShutdownVByPForPass_r(dp, pass);
1236 total += pass_stats[pass];
1239 /* end exclusive access */
1240 VVByPListEndExclusive_r(dp);
1242 Log("VShutdownByPartition: shut down %d volumes on %s (pass[0]=%d, pass[1]=%d, pass[2]=%d, pass[3]=%d)\n",
1243 total, VPartitionPath(dp), pass_stats[0], pass_stats[1], pass_stats[2], pass_stats[3]);
1248 /* internal shutdown functionality
1250 * for multi-pass shutdown:
1251 * 0 to only "shutdown" {pre,un}attached and error state volumes
1252 * 1 to also shutdown attached volumes w/ volume header loaded
1253 * 2 to also shutdown attached volumes w/o volume header loaded
1254 * 3 to also shutdown exclusive state volumes
1256 * caller MUST hold exclusive access on the hash chain
1257 * because we drop vol_glock_mutex internally
1259 * this function is reentrant for passes 1--3
1260 * (e.g. multiple threads can cooperate to
1261 * shutdown a partition mp-fast)
1263 * pass 0 is not scaleable because the volume state data is
1264 * synchronized by vol_glock mutex, and the locking overhead
1265 * is too high to drop the lock long enough to do linked list
1269 ShutdownVByPForPass_r(struct DiskPartition64 * dp, int pass)
1271 struct rx_queue * q = queue_First(&dp->vol_list, rx_queue);
1274 while (ShutdownVolumeWalk_r(dp, pass, &q))
1280 /* conditionally shutdown one volume on partition dp
1281 * returns 1 if a volume was shutdown in this pass,
1284 ShutdownVolumeWalk_r(struct DiskPartition64 * dp, int pass,
1285 struct rx_queue ** idx)
1287 struct rx_queue *qp, *nqp;
1292 for (queue_ScanFrom(&dp->vol_list, qp, qp, nqp, rx_queue)) {
1293 vp = (Volume *) (((char *)qp) - offsetof(Volume, vol_list));
1297 if ((V_attachState(vp) != VOL_STATE_UNATTACHED) &&
1298 (V_attachState(vp) != VOL_STATE_ERROR) &&
1299 (V_attachState(vp) != VOL_STATE_PREATTACHED)) {
1303 if ((V_attachState(vp) == VOL_STATE_ATTACHED) &&
1304 (vp->header == NULL)) {
1308 if (VIsExclusiveState(V_attachState(vp))) {
1313 DeleteVolumeFromVByPList_r(vp);
1314 VShutdownVolume_r(vp);
1324 * shutdown a specific volume
1326 /* caller MUST NOT hold a heavyweight ref on vp */
1328 VShutdownVolume_r(Volume * vp)
1332 VCreateReservation_r(vp);
1334 if (LogLevel >= 5) {
1335 Log("VShutdownVolume_r: vid=%u, device=%d, state=%hu\n",
1336 vp->hashid, vp->partition->device, V_attachState(vp));
1339 /* wait for other blocking ops to finish */
1340 VWaitExclusiveState_r(vp);
1342 assert(VIsValidState(V_attachState(vp)));
1344 switch(V_attachState(vp)) {
1345 case VOL_STATE_SALVAGING:
1346 /* Leave salvaging volumes alone. Any in-progress salvages will
1347 * continue working after viced shuts down. This is intentional.
1350 case VOL_STATE_PREATTACHED:
1351 case VOL_STATE_ERROR:
1352 VChangeState_r(vp, VOL_STATE_UNATTACHED);
1353 case VOL_STATE_UNATTACHED:
1355 case VOL_STATE_GOING_OFFLINE:
1356 case VOL_STATE_SHUTTING_DOWN:
1357 case VOL_STATE_ATTACHED:
1361 Log("VShutdown: Attempting to take volume %u offline.\n",
1364 /* take the volume offline (drops reference count) */
1365 VOffline_r(vp, "File server was shut down");
1370 VCancelReservation_r(vp);
1374 #endif /* AFS_DEMAND_ATTACH_FS */
1377 /***************************************************/
1378 /* Header I/O routines */
1379 /***************************************************/
1381 /* open a descriptor for the inode (h),
1382 * read in an on-disk structure into buffer (to) of size (size),
1383 * verify versionstamp in structure has magic (magic) and
1384 * optionally verify version (version) if (version) is nonzero
1387 ReadHeader(Error * ec, IHandle_t * h, char *to, int size, bit32 magic,
1390 struct versionStamp *vsn;
1405 if (FDH_SEEK(fdP, 0, SEEK_SET) < 0) {
1407 FDH_REALLYCLOSE(fdP);
1410 vsn = (struct versionStamp *)to;
1411 if (FDH_READ(fdP, to, size) != size || vsn->magic != magic) {
1413 FDH_REALLYCLOSE(fdP);
1418 /* Check is conditional, in case caller wants to inspect version himself */
1419 if (version && vsn->version != version) {
1425 WriteVolumeHeader_r(Error * ec, Volume * vp)
1427 IHandle_t *h = V_diskDataHandle(vp);
1437 if (FDH_SEEK(fdP, 0, SEEK_SET) < 0) {
1439 FDH_REALLYCLOSE(fdP);
1442 if (FDH_WRITE(fdP, (char *)&V_disk(vp), sizeof(V_disk(vp)))
1443 != sizeof(V_disk(vp))) {
1445 FDH_REALLYCLOSE(fdP);
1451 /* VolumeHeaderToDisk
1452 * Allows for storing 64 bit inode numbers in on-disk volume header
1455 /* convert in-memory representation of a volume header to the
1456 * on-disk representation of a volume header */
1458 VolumeHeaderToDisk(VolumeDiskHeader_t * dh, VolumeHeader_t * h)
1461 memset((char *)dh, 0, sizeof(VolumeDiskHeader_t));
1462 dh->stamp = h->stamp;
1464 dh->parent = h->parent;
1466 #ifdef AFS_64BIT_IOPS_ENV
1467 dh->volumeInfo_lo = (afs_int32) h->volumeInfo & 0xffffffff;
1468 dh->volumeInfo_hi = (afs_int32) (h->volumeInfo >> 32) & 0xffffffff;
1469 dh->smallVnodeIndex_lo = (afs_int32) h->smallVnodeIndex & 0xffffffff;
1470 dh->smallVnodeIndex_hi =
1471 (afs_int32) (h->smallVnodeIndex >> 32) & 0xffffffff;
1472 dh->largeVnodeIndex_lo = (afs_int32) h->largeVnodeIndex & 0xffffffff;
1473 dh->largeVnodeIndex_hi =
1474 (afs_int32) (h->largeVnodeIndex >> 32) & 0xffffffff;
1475 dh->linkTable_lo = (afs_int32) h->linkTable & 0xffffffff;
1476 dh->linkTable_hi = (afs_int32) (h->linkTable >> 32) & 0xffffffff;
1478 dh->volumeInfo_lo = h->volumeInfo;
1479 dh->smallVnodeIndex_lo = h->smallVnodeIndex;
1480 dh->largeVnodeIndex_lo = h->largeVnodeIndex;
1481 dh->linkTable_lo = h->linkTable;
1485 /* DiskToVolumeHeader
1486 * Converts an on-disk representation of a volume header to
1487 * the in-memory representation of a volume header.
1489 * Makes the assumption that AFS has *always*
1490 * zero'd the volume header file so that high parts of inode
1491 * numbers are 0 in older (SGI EFS) volume header files.
1494 DiskToVolumeHeader(VolumeHeader_t * h, VolumeDiskHeader_t * dh)
1496 memset((char *)h, 0, sizeof(VolumeHeader_t));
1497 h->stamp = dh->stamp;
1499 h->parent = dh->parent;
1501 #ifdef AFS_64BIT_IOPS_ENV
1503 (Inode) dh->volumeInfo_lo | ((Inode) dh->volumeInfo_hi << 32);
1505 h->smallVnodeIndex =
1506 (Inode) dh->smallVnodeIndex_lo | ((Inode) dh->
1507 smallVnodeIndex_hi << 32);
1509 h->largeVnodeIndex =
1510 (Inode) dh->largeVnodeIndex_lo | ((Inode) dh->
1511 largeVnodeIndex_hi << 32);
1513 (Inode) dh->linkTable_lo | ((Inode) dh->linkTable_hi << 32);
1515 h->volumeInfo = dh->volumeInfo_lo;
1516 h->smallVnodeIndex = dh->smallVnodeIndex_lo;
1517 h->largeVnodeIndex = dh->largeVnodeIndex_lo;
1518 h->linkTable = dh->linkTable_lo;
1523 /***************************************************/
1524 /* Volume Attachment routines */
1525 /***************************************************/
1527 #ifdef AFS_DEMAND_ATTACH_FS
1529 * pre-attach a volume given its path.
1531 * @param[out] ec outbound error code
1532 * @param[in] partition partition path string
1533 * @param[in] name volume id string
1535 * @return volume object pointer
1537 * @note A pre-attached volume will only have its partition
1538 * and hashid fields initialized. At first call to
1539 * VGetVolume, the volume will be fully attached.
1543 VPreAttachVolumeByName(Error * ec, char *partition, char *name)
1547 vp = VPreAttachVolumeByName_r(ec, partition, name);
1553 * pre-attach a volume given its path.
1555 * @param[out] ec outbound error code
1556 * @param[in] partition path to vice partition
1557 * @param[in] name volume id string
1559 * @return volume object pointer
1561 * @pre VOL_LOCK held
1563 * @internal volume package internal use only.
1566 VPreAttachVolumeByName_r(Error * ec, char *partition, char *name)
1568 return VPreAttachVolumeById_r(ec,
1570 VolumeNumber(name));
1574 * pre-attach a volume given its path and numeric volume id.
1576 * @param[out] ec error code return
1577 * @param[in] partition path to vice partition
1578 * @param[in] volumeId numeric volume id
1580 * @return volume object pointer
1582 * @pre VOL_LOCK held
1584 * @internal volume package internal use only.
1587 VPreAttachVolumeById_r(Error * ec,
1592 struct DiskPartition64 *partp;
1596 assert(programType == fileServer);
1598 if (!(partp = VGetPartition_r(partition, 0))) {
1600 Log("VPreAttachVolumeById_r: Error getting partition (%s)\n", partition);
1604 vp = VLookupVolume_r(ec, volumeId, NULL);
1609 return VPreAttachVolumeByVp_r(ec, partp, vp, volumeId);
1613 * preattach a volume.
1615 * @param[out] ec outbound error code
1616 * @param[in] partp pointer to partition object
1617 * @param[in] vp pointer to volume object
1618 * @param[in] vid volume id
1620 * @return volume object pointer
1622 * @pre VOL_LOCK is held.
1624 * @warning Returned volume object pointer does not have to
1625 * equal the pointer passed in as argument vp. There
1626 * are potential race conditions which can result in
1627 * the pointers having different values. It is up to
1628 * the caller to make sure that references are handled
1629 * properly in this case.
1631 * @note If there is already a volume object registered with
1632 * the same volume id, its pointer MUST be passed as
1633 * argument vp. Failure to do so will result in a silent
1634 * failure to preattach.
1636 * @internal volume package internal use only.
1639 VPreAttachVolumeByVp_r(Error * ec,
1640 struct DiskPartition64 * partp,
1648 /* check to see if pre-attach already happened */
1650 (V_attachState(vp) != VOL_STATE_UNATTACHED) &&
1651 (V_attachState(vp) != VOL_STATE_PREATTACHED) &&
1652 !VIsErrorState(V_attachState(vp))) {
1654 * pre-attach is a no-op in all but the following cases:
1656 * - volume is unattached
1657 * - volume is in an error state
1658 * - volume is pre-attached
1660 Log("VPreattachVolumeByVp_r: volume %u not in quiescent state\n", vid);
1663 /* we're re-attaching a volume; clear out some old state */
1664 memset(&vp->salvage, 0, sizeof(struct VolumeOnlineSalvage));
1666 if (V_partition(vp) != partp) {
1667 /* XXX potential race */
1668 DeleteVolumeFromVByPList_r(vp);
1671 /* if we need to allocate a new Volume struct,
1672 * go ahead and drop the vol glock, otherwise
1673 * do the basic setup synchronised, as it's
1674 * probably not worth dropping the lock */
1677 /* allocate the volume structure */
1678 vp = nvp = (Volume *) malloc(sizeof(Volume));
1680 memset(vp, 0, sizeof(Volume));
1681 queue_Init(&vp->vnode_list);
1682 assert(pthread_cond_init(&V_attachCV(vp), NULL) == 0);
1685 /* link the volume with its associated vice partition */
1686 vp->device = partp->device;
1687 vp->partition = partp;
1690 vp->specialStatus = 0;
1692 /* if we dropped the lock, reacquire the lock,
1693 * check for pre-attach races, and then add
1694 * the volume to the hash table */
1697 nvp = VLookupVolume_r(ec, vid, NULL);
1702 } else if (nvp) { /* race detected */
1707 /* hack to make up for VChangeState_r() decrementing
1708 * the old state counter */
1709 VStats.state_levels[0]++;
1713 /* put pre-attached volume onto the hash table
1714 * and bring it up to the pre-attached state */
1715 AddVolumeToHashTable(vp, vp->hashid);
1716 AddVolumeToVByPList_r(vp);
1717 VLRU_Init_Node_r(vp);
1718 VChangeState_r(vp, VOL_STATE_PREATTACHED);
1721 Log("VPreAttachVolumeByVp_r: volume %u pre-attached\n", vp->hashid);
1729 #endif /* AFS_DEMAND_ATTACH_FS */
1731 /* Attach an existing volume, given its pathname, and return a
1732 pointer to the volume header information. The volume also
1733 normally goes online at this time. An offline volume
1734 must be reattached to make it go online */
1736 VAttachVolumeByName(Error * ec, char *partition, char *name, int mode)
1740 retVal = VAttachVolumeByName_r(ec, partition, name, mode);
1746 VAttachVolumeByName_r(Error * ec, char *partition, char *name, int mode)
1748 register Volume *vp = NULL;
1750 struct afs_stat status;
1751 struct VolumeDiskHeader diskHeader;
1752 struct VolumeHeader iheader;
1753 struct DiskPartition64 *partp;
1757 #ifdef AFS_DEMAND_ATTACH_FS
1758 VolumeStats stats_save;
1760 #endif /* AFS_DEMAND_ATTACH_FS */
1764 volumeId = VolumeNumber(name);
1766 if (!(partp = VGetPartition_r(partition, 0))) {
1768 Log("VAttachVolume: Error getting partition (%s)\n", partition);
1772 if (programType == volumeUtility) {
1774 VLockPartition_r(partition);
1775 } else if (programType == fileServer) {
1776 #ifdef AFS_DEMAND_ATTACH_FS
1777 /* lookup the volume in the hash table */
1778 vp = VLookupVolume_r(ec, volumeId, NULL);
1784 /* save any counters that are supposed to
1785 * be monotonically increasing over the
1786 * lifetime of the fileserver */
1787 memcpy(&stats_save, &vp->stats, sizeof(VolumeStats));
1789 memset(&stats_save, 0, sizeof(VolumeStats));
1792 /* if there's something in the hash table, and it's not
1793 * in the pre-attach state, then we may need to detach
1794 * it before proceeding */
1795 if (vp && (V_attachState(vp) != VOL_STATE_PREATTACHED)) {
1796 VCreateReservation_r(vp);
1797 VWaitExclusiveState_r(vp);
1799 /* at this point state must be one of:
1808 if (vp->specialStatus == VBUSY)
1811 /* if it's already attached, see if we can return it */
1812 if (V_attachState(vp) == VOL_STATE_ATTACHED) {
1813 VGetVolumeByVp_r(ec, vp);
1814 if (V_inUse(vp) == fileServer) {
1815 VCancelReservation_r(vp);
1819 /* otherwise, we need to detach, and attempt to re-attach */
1820 VDetachVolume_r(ec, vp);
1822 Log("VAttachVolume: Error detaching old volume instance (%s)\n", name);
1825 /* if it isn't fully attached, delete from the hash tables,
1826 and let the refcounter handle the rest */
1827 DeleteVolumeFromHashTable(vp);
1828 DeleteVolumeFromVByPList_r(vp);
1831 VCancelReservation_r(vp);
1835 /* pre-attach volume if it hasn't been done yet */
1837 (V_attachState(vp) == VOL_STATE_UNATTACHED) ||
1838 (V_attachState(vp) == VOL_STATE_ERROR)) {
1840 vp = VPreAttachVolumeByVp_r(ec, partp, vp, volumeId);
1848 /* handle pre-attach races
1850 * multiple threads can race to pre-attach a volume,
1851 * but we can't let them race beyond that
1853 * our solution is to let the first thread to bring
1854 * the volume into an exclusive state win; the other
1855 * threads just wait until it finishes bringing the
1856 * volume online, and then they do a vgetvolumebyvp
1858 if (svp && (svp != vp)) {
1859 /* wait for other exclusive ops to finish */
1860 VCreateReservation_r(vp);
1861 VWaitExclusiveState_r(vp);
1863 /* get a heavyweight ref, kill the lightweight ref, and return */
1864 VGetVolumeByVp_r(ec, vp);
1865 VCancelReservation_r(vp);
1869 /* at this point, we are chosen as the thread to do
1870 * demand attachment for this volume. all other threads
1871 * doing a getvolume on vp->hashid will block until we finish */
1873 /* make sure any old header cache entries are invalidated
1874 * before proceeding */
1875 FreeVolumeHeader(vp);
1877 VChangeState_r(vp, VOL_STATE_ATTACHING);
1879 /* restore any saved counters */
1880 memcpy(&vp->stats, &stats_save, sizeof(VolumeStats));
1881 #else /* AFS_DEMAND_ATTACH_FS */
1882 vp = VGetVolume_r(ec, volumeId);
1884 if (V_inUse(vp) == fileServer)
1886 if (vp->specialStatus == VBUSY)
1888 VDetachVolume_r(ec, vp);
1890 Log("VAttachVolume: Error detaching volume (%s)\n", name);
1894 #endif /* AFS_DEMAND_ATTACH_FS */
1898 strcpy(path, VPartitionPath(partp));
1904 if ((fd = afs_open(path, O_RDONLY)) == -1 || afs_fstat(fd, &status) == -1) {
1905 Log("VAttachVolume: Failed to open %s (errno %d)\n", path, errno);
1912 n = read(fd, &diskHeader, sizeof(diskHeader));
1914 if (n != sizeof(diskHeader)
1915 || diskHeader.stamp.magic != VOLUMEHEADERMAGIC) {
1916 Log("VAttachVolume: Error reading volume header %s\n", path);
1921 if (diskHeader.stamp.version != VOLUMEHEADERVERSION) {
1922 Log("VAttachVolume: Volume %s, version number is incorrect; volume needs salvaged\n", path);
1928 DiskToVolumeHeader(&iheader, &diskHeader);
1929 #ifdef FSSYNC_BUILD_CLIENT
1930 if (programType == volumeUtility && mode != V_SECRETLY && mode != V_PEEK) {
1932 if (FSYNC_VolOp(iheader.id, partition, FSYNC_VOL_NEEDVOLUME, mode, NULL)
1934 Log("VAttachVolume: attach of volume %u apparently denied by file server\n", iheader.id);
1935 *ec = VNOVOL; /* XXXX */
1943 vp = (Volume *) calloc(1, sizeof(Volume));
1945 vp->device = partp->device;
1946 vp->partition = partp;
1947 queue_Init(&vp->vnode_list);
1948 #ifdef AFS_DEMAND_ATTACH_FS
1949 assert(pthread_cond_init(&V_attachCV(vp), NULL) == 0);
1950 #endif /* AFS_DEMAND_ATTACH_FS */
1953 /* attach2 is entered without any locks, and returns
1954 * with vol_glock_mutex held */
1955 vp = attach2(ec, volumeId, path, &iheader, partp, vp, isbusy, mode);
1957 if (programType == volumeUtility && vp) {
1958 if ((mode == V_VOLUPD) || (VolumeWriteable(vp) && (mode == V_CLONE))) {
1959 /* mark volume header as in use so that volser crashes lead to a
1960 * salvage attempt */
1961 VUpdateVolume_r(ec, vp, 0);
1963 #ifdef AFS_DEMAND_ATTACH_FS
1964 /* for dafs, we should tell the fileserver, except for V_PEEK
1965 * where we know it is not necessary */
1966 if (mode == V_PEEK) {
1967 vp->needsPutBack = 0;
1969 vp->needsPutBack = 1;
1971 #else /* !AFS_DEMAND_ATTACH_FS */
1972 /* duplicate computation in fssync.c about whether the server
1973 * takes the volume offline or not. If the volume isn't
1974 * offline, we must not return it when we detach the volume,
1975 * or the server will abort */
1976 if (mode == V_READONLY || mode == V_PEEK
1977 || (!VolumeWriteable(vp) && (mode == V_CLONE || mode == V_DUMP)))
1978 vp->needsPutBack = 0;
1980 vp->needsPutBack = 1;
1981 #endif /* !AFS_DEMAND_ATTACH_FS */
1983 /* OK, there's a problem here, but one that I don't know how to
1984 * fix right now, and that I don't think should arise often.
1985 * Basically, we should only put back this volume to the server if
1986 * it was given to us by the server, but since we don't have a vp,
1987 * we can't run the VolumeWriteable function to find out as we do
1988 * above when computing vp->needsPutBack. So we send it back, but
1989 * there's a path in VAttachVolume on the server which may abort
1990 * if this volume doesn't have a header. Should be pretty rare
1991 * for all of that to happen, but if it does, probably the right
1992 * fix is for the server to allow the return of readonly volumes
1993 * that it doesn't think are really checked out. */
1994 #ifdef FSSYNC_BUILD_CLIENT
1995 if (programType == volumeUtility && vp == NULL &&
1996 mode != V_SECRETLY && mode != V_PEEK) {
1997 FSYNC_VolOp(iheader.id, partition, FSYNC_VOL_ON, 0, NULL);
2000 if (programType == fileServer && vp) {
2001 #ifdef AFS_DEMAND_ATTACH_FS
2003 * we can get here in cases where we don't "own"
2004 * the volume (e.g. volume owned by a utility).
2005 * short circuit around potential disk header races.
2007 if (V_attachState(vp) != VOL_STATE_ATTACHED) {
2011 V_needsCallback(vp) = 0;
2013 if (VInit >= 2 && V_BreakVolumeCallbacks) {
2014 Log("VAttachVolume: Volume %u was changed externally; breaking callbacks\n", V_id(vp));
2015 (*V_BreakVolumeCallbacks) (V_id(vp));
2018 VUpdateVolume_r(ec, vp, 0);
2020 Log("VAttachVolume: Error updating volume\n");
2025 if (VolumeWriteable(vp) && V_dontSalvage(vp) == 0) {
2026 #ifndef AFS_DEMAND_ATTACH_FS
2027 /* This is a hack: by temporarily setting the incore
2028 * dontSalvage flag ON, the volume will be put back on the
2029 * Update list (with dontSalvage OFF again). It will then
2030 * come back in N minutes with DONT_SALVAGE eventually
2031 * set. This is the way that volumes that have never had
2032 * it set get it set; or that volumes that have been
2033 * offline without DONT SALVAGE having been set also
2034 * eventually get it set */
2035 V_dontSalvage(vp) = DONT_SALVAGE;
2036 #endif /* !AFS_DEMAND_ATTACH_FS */
2037 VAddToVolumeUpdateList_r(ec, vp);
2039 Log("VAttachVolume: Error adding volume to update list\n");
2046 Log("VOnline: volume %u (%s) attached and online\n", V_id(vp),
2051 if (programType == volumeUtility) {
2052 VUnlockPartition_r(partition);
2055 #ifdef AFS_DEMAND_ATTACH_FS
2056 /* attach failed; make sure we're in error state */
2057 if (vp && !VIsErrorState(V_attachState(vp))) {
2058 VChangeState_r(vp, VOL_STATE_ERROR);
2060 #endif /* AFS_DEMAND_ATTACH_FS */
2067 #ifdef AFS_DEMAND_ATTACH_FS
2068 /* VAttachVolumeByVp_r
2070 * finish attaching a volume that is
2071 * in a less than fully attached state
2073 /* caller MUST hold a ref count on vp */
2075 VAttachVolumeByVp_r(Error * ec, Volume * vp, int mode)
2077 char name[VMAXPATHLEN];
2078 int fd, n, reserve = 0;
2079 struct afs_stat status;
2080 struct VolumeDiskHeader diskHeader;
2081 struct VolumeHeader iheader;
2082 struct DiskPartition64 *partp;
2087 VolumeStats stats_save;
2090 /* volume utility should never call AttachByVp */
2091 assert(programType == fileServer);
2093 volumeId = vp->hashid;
2094 partp = vp->partition;
2095 VolumeExternalName_r(volumeId, name, sizeof(name));
2098 /* if another thread is performing a blocking op, wait */
2099 VWaitExclusiveState_r(vp);
2101 memcpy(&stats_save, &vp->stats, sizeof(VolumeStats));
2103 /* if it's already attached, see if we can return it */
2104 if (V_attachState(vp) == VOL_STATE_ATTACHED) {
2105 VGetVolumeByVp_r(ec, vp);
2106 if (V_inUse(vp) == fileServer) {
2109 if (vp->specialStatus == VBUSY)
2111 VDetachVolume_r(ec, vp);
2113 Log("VAttachVolume: Error detaching volume (%s)\n", name);
2119 /* pre-attach volume if it hasn't been done yet */
2121 (V_attachState(vp) == VOL_STATE_UNATTACHED) ||
2122 (V_attachState(vp) == VOL_STATE_ERROR)) {
2123 nvp = VPreAttachVolumeByVp_r(ec, partp, vp, volumeId);
2129 VCreateReservation_r(nvp);
2135 VChangeState_r(vp, VOL_STATE_ATTACHING);
2137 /* restore monotonically increasing stats */
2138 memcpy(&vp->stats, &stats_save, sizeof(VolumeStats));
2143 /* compute path to disk header,
2145 * and verify magic and version stamps */
2146 strcpy(path, VPartitionPath(partp));
2152 if ((fd = afs_open(path, O_RDONLY)) == -1 || afs_fstat(fd, &status) == -1) {
2153 Log("VAttachVolume: Failed to open %s (errno %d)\n", path, errno);
2160 n = read(fd, &diskHeader, sizeof(diskHeader));
2162 if (n != sizeof(diskHeader)
2163 || diskHeader.stamp.magic != VOLUMEHEADERMAGIC) {
2164 Log("VAttachVolume: Error reading volume header %s\n", path);
2169 if (diskHeader.stamp.version != VOLUMEHEADERVERSION) {
2170 Log("VAttachVolume: Volume %s, version number is incorrect; volume needs salvaged\n", path);
2176 /* convert on-disk header format to in-memory header format */
2177 DiskToVolumeHeader(&iheader, &diskHeader);
2181 * NOTE: attach2 is entered without any locks, and returns
2182 * with vol_glock_mutex held */
2183 vp = attach2(ec, volumeId, path, &iheader, partp, vp, isbusy, mode);
2186 * the event that an error was encountered, or
2187 * the volume was not brought to an attached state
2188 * for any reason, skip to the end. We cannot
2189 * safely call VUpdateVolume unless we "own" it.
2193 (V_attachState(vp) != VOL_STATE_ATTACHED)) {
2197 V_needsCallback(vp) = 0;
2198 VUpdateVolume_r(ec, vp, 0);
2200 Log("VAttachVolume: Error updating volume %u\n", vp->hashid);
2204 if (VolumeWriteable(vp) && V_dontSalvage(vp) == 0) {
2205 #ifndef AFS_DEMAND_ATTACH_FS
2206 /* This is a hack: by temporarily setting the incore
2207 * dontSalvage flag ON, the volume will be put back on the
2208 * Update list (with dontSalvage OFF again). It will then
2209 * come back in N minutes with DONT_SALVAGE eventually
2210 * set. This is the way that volumes that have never had
2211 * it set get it set; or that volumes that have been
2212 * offline without DONT SALVAGE having been set also
2213 * eventually get it set */
2214 V_dontSalvage(vp) = DONT_SALVAGE;
2215 #endif /* !AFS_DEMAND_ATTACH_FS */
2216 VAddToVolumeUpdateList_r(ec, vp);
2218 Log("VAttachVolume: Error adding volume %u to update list\n", vp->hashid);
2225 Log("VOnline: volume %u (%s) attached and online\n", V_id(vp),
2229 VCancelReservation_r(nvp);
2232 if (*ec && (*ec != VOFFLINE) && (*ec != VSALVAGE)) {
2233 if (vp && !VIsErrorState(V_attachState(vp))) {
2234 VChangeState_r(vp, VOL_STATE_ERROR);
2241 #endif /* AFS_DEMAND_ATTACH_FS */
2244 * called without any locks held
2245 * returns with vol_glock_mutex held
2248 attach2(Error * ec, VolId volumeId, char *path, register struct VolumeHeader * header,
2249 struct DiskPartition64 * partp, register Volume * vp, int isbusy, int mode)
2251 vp->specialStatus = (byte) (isbusy ? VBUSY : 0);
2252 IH_INIT(vp->vnodeIndex[vLarge].handle, partp->device, header->parent,
2253 header->largeVnodeIndex);
2254 IH_INIT(vp->vnodeIndex[vSmall].handle, partp->device, header->parent,
2255 header->smallVnodeIndex);
2256 IH_INIT(vp->diskDataHandle, partp->device, header->parent,
2257 header->volumeInfo);
2258 IH_INIT(vp->linkHandle, partp->device, header->parent, header->linkTable);
2259 vp->shuttingDown = 0;
2260 vp->goingOffline = 0;
2262 #ifdef AFS_DEMAND_ATTACH_FS
2263 vp->stats.last_attach = FT_ApproxTime();
2264 vp->stats.attaches++;
2268 IncUInt64(&VStats.attaches);
2269 vp->cacheCheck = ++VolumeCacheCheck;
2270 /* just in case this ever rolls over */
2271 if (!vp->cacheCheck)
2272 vp->cacheCheck = ++VolumeCacheCheck;
2273 GetVolumeHeader(vp);
2276 #if defined(AFS_DEMAND_ATTACH_FS) && defined(FSSYNC_BUILD_CLIENT)
2277 /* demand attach changes the V_PEEK mechanism
2279 * we can now suck the current disk data structure over
2280 * the fssync interface without going to disk
2282 * (technically, we don't need to restrict this feature
2283 * to demand attach fileservers. However, I'm trying
2284 * to limit the number of common code changes)
2286 if (programType != fileServer && mode == V_PEEK) {
2288 res.payload.len = sizeof(VolumeDiskData);
2289 res.payload.buf = &vp->header->diskstuff;
2291 if (FSYNC_VolOp(volumeId,
2293 FSYNC_VOL_QUERY_HDR,
2296 goto disk_header_loaded;
2299 #endif /* AFS_DEMAND_ATTACH_FS && FSSYNC_BUILD_CLIENT */
2300 (void)ReadHeader(ec, V_diskDataHandle(vp), (char *)&V_disk(vp),
2301 sizeof(V_disk(vp)), VOLUMEINFOMAGIC, VOLUMEINFOVERSION);
2303 #ifdef AFS_DEMAND_ATTACH_FS
2306 IncUInt64(&VStats.hdr_loads);
2307 IncUInt64(&vp->stats.hdr_loads);
2309 #endif /* AFS_DEMAND_ATTACH_FS */
2312 Log("VAttachVolume: Error reading diskDataHandle vol header %s; error=%u\n", path, *ec);
2315 #ifdef AFS_DEMAND_ATTACH_FS
2319 /* check for pending volume operations */
2320 if (vp->pending_vol_op) {
2321 /* see if the pending volume op requires exclusive access */
2322 switch (vp->pending_vol_op->vol_op_state) {
2323 case FSSYNC_VolOpPending:
2324 /* this should never happen */
2325 assert(vp->pending_vol_op->vol_op_state != FSSYNC_VolOpPending);
2328 case FSSYNC_VolOpRunningUnknown:
2329 if (VVolOpLeaveOnline_r(vp, vp->pending_vol_op)) {
2330 vp->pending_vol_op->vol_op_state = FSSYNC_VolOpRunningOnline;
2333 vp->pending_vol_op->vol_op_state = FSSYNC_VolOpRunningOffline;
2334 /* fall through to take volume offline */
2337 case FSSYNC_VolOpRunningOffline:
2338 /* mark the volume down */
2340 VChangeState_r(vp, VOL_STATE_UNATTACHED);
2341 if (V_offlineMessage(vp)[0] == '\0')
2342 strlcpy(V_offlineMessage(vp),
2343 "A volume utility is running.",
2344 sizeof(V_offlineMessage(vp)));
2345 V_offlineMessage(vp)[sizeof(V_offlineMessage(vp)) - 1] = '\0';
2347 /* check to see if we should set the specialStatus flag */
2348 if (VVolOpSetVBusy_r(vp, vp->pending_vol_op)) {
2349 vp->specialStatus = VBUSY;
2354 V_attachFlags(vp) |= VOL_HDR_LOADED;
2355 vp->stats.last_hdr_load = vp->stats.last_attach;
2357 #endif /* AFS_DEMAND_ATTACH_FS */
2360 struct IndexFileHeader iHead;
2362 #if OPENAFS_VOL_STATS
2364 * We just read in the diskstuff part of the header. If the detailed
2365 * volume stats area has not yet been initialized, we should bzero the
2366 * area and mark it as initialized.
2368 if (!(V_stat_initialized(vp))) {
2369 memset((char *)(V_stat_area(vp)), 0, VOL_STATS_BYTES);
2370 V_stat_initialized(vp) = 1;
2372 #endif /* OPENAFS_VOL_STATS */
2374 (void)ReadHeader(ec, vp->vnodeIndex[vSmall].handle,
2375 (char *)&iHead, sizeof(iHead),
2376 SMALLINDEXMAGIC, SMALLINDEXVERSION);
2379 Log("VAttachVolume: Error reading smallVnode vol header %s; error=%u\n", path, *ec);
2384 struct IndexFileHeader iHead;
2386 (void)ReadHeader(ec, vp->vnodeIndex[vLarge].handle,
2387 (char *)&iHead, sizeof(iHead),
2388 LARGEINDEXMAGIC, LARGEINDEXVERSION);
2391 Log("VAttachVolume: Error reading largeVnode vol header %s; error=%u\n", path, *ec);
2395 #ifdef AFS_NAMEI_ENV
2397 struct versionStamp stamp;
2399 (void)ReadHeader(ec, V_linkHandle(vp), (char *)&stamp,
2400 sizeof(stamp), LINKTABLEMAGIC, LINKTABLEVERSION);
2403 Log("VAttachVolume: Error reading namei vol header %s; error=%u\n", path, *ec);
2406 #endif /* AFS_NAMEI_ENV */
2408 #if defined(AFS_DEMAND_ATTACH_FS)
2409 if (*ec && ((*ec != VOFFLINE) || (V_attachState(vp) != VOL_STATE_UNATTACHED))) {
2411 if (programType == fileServer) {
2412 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
2415 Log("VAttachVolume: Error attaching volume %s; volume needs salvage; error=%u\n", path, *ec);
2421 /* volume operation in progress */
2425 #else /* AFS_DEMAND_ATTACH_FS */
2427 Log("VAttachVolume: Error attaching volume %s; volume needs salvage; error=%u\n", path, *ec);
2432 #endif /* AFS_DEMAND_ATTACH_FS */
2434 if (V_needsSalvaged(vp)) {
2435 if (vp->specialStatus)
2436 vp->specialStatus = 0;
2438 #if defined(AFS_DEMAND_ATTACH_FS)
2439 if (programType == fileServer) {
2440 VRequestSalvage_r(ec, vp, SALVSYNC_NEEDED, VOL_SALVAGE_INVALIDATE_HEADER);
2443 Log("VAttachVolume: volume salvage flag is ON for %s; volume needs salvage\n", path);
2447 #else /* AFS_DEMAND_ATTACH_FS */
2450 #endif /* AFS_DEMAND_ATTACH_FS */
2455 if (programType == fileServer) {
2456 #ifndef FAST_RESTART
2457 if (V_inUse(vp) && VolumeWriteable(vp)) {
2458 if (!V_needsSalvaged(vp)) {
2459 V_needsSalvaged(vp) = 1;
2460 VUpdateVolume_r(ec, vp, 0);
2462 #if defined(AFS_DEMAND_ATTACH_FS)
2463 VRequestSalvage_r(ec, vp, SALVSYNC_NEEDED, VOL_SALVAGE_INVALIDATE_HEADER);
2465 #else /* AFS_DEMAND_ATTACH_FS */
2466 Log("VAttachVolume: volume %s needs to be salvaged; not attached.\n", path);
2469 #endif /* AFS_DEMAND_ATTACH_FS */
2472 #endif /* FAST_RESTART */
2474 if (V_destroyMe(vp) == DESTROY_ME) {
2475 #if defined(AFS_DEMAND_ATTACH_FS)
2476 /* schedule a salvage so the volume goes away on disk */
2477 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
2478 VChangeState_r(vp, VOL_STATE_ERROR);
2480 #endif /* AFS_DEMAND_ATTACH_FS */
2482 Log("VAttachVolume: volume %s is junk; it should be destroyed at next salvage\n", path);
2488 vp->nextVnodeUnique = V_uniquifier(vp);
2489 vp->vnodeIndex[vSmall].bitmap = vp->vnodeIndex[vLarge].bitmap = NULL;
2490 #ifndef BITMAP_LATER
2491 if (programType == fileServer && VolumeWriteable(vp)) {
2493 for (i = 0; i < nVNODECLASSES; i++) {
2494 VGetBitmap_r(ec, vp, i);
2496 #ifdef AFS_DEMAND_ATTACH_FS
2497 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
2499 #else /* AFS_DEMAND_ATTACH_FS */
2501 #endif /* AFS_DEMAND_ATTACH_FS */
2502 Log("VAttachVolume: error getting bitmap for volume (%s)\n",
2508 #endif /* BITMAP_LATER */
2510 if (programType == fileServer) {
2511 if (vp->specialStatus)
2512 vp->specialStatus = 0;
2513 if (V_blessed(vp) && V_inService(vp) && !V_needsSalvaged(vp)) {
2514 V_inUse(vp) = fileServer;
2515 V_offlineMessage(vp)[0] = '\0';
2518 if ((mode != V_PEEK) && (mode != V_SECRETLY))
2519 V_inUse(vp) = programType;
2520 V_checkoutMode(vp) = mode;
2523 AddVolumeToHashTable(vp, V_id(vp));
2524 #ifdef AFS_DEMAND_ATTACH_FS
2525 if ((programType != fileServer) ||
2526 (V_inUse(vp) == fileServer)) {
2527 AddVolumeToVByPList_r(vp);
2529 VChangeState_r(vp, VOL_STATE_ATTACHED);
2531 VChangeState_r(vp, VOL_STATE_UNATTACHED);
2537 /* Attach an existing volume.
2538 The volume also normally goes online at this time.
2539 An offline volume must be reattached to make it go online.
2543 VAttachVolume(Error * ec, VolumeId volumeId, int mode)
2547 retVal = VAttachVolume_r(ec, volumeId, mode);
2553 VAttachVolume_r(Error * ec, VolumeId volumeId, int mode)
2556 VGetVolumePath(ec, volumeId, &part, &name);
2558 register Volume *vp;
2560 vp = VGetVolume_r(&error, volumeId);
2562 assert(V_inUse(vp) == 0);
2563 VDetachVolume_r(ec, vp);
2567 return VAttachVolumeByName_r(ec, part, name, mode);
2570 /* Increment a reference count to a volume, sans context swaps. Requires
2571 * possibly reading the volume header in from the disk, since there's
2572 * an invariant in the volume package that nUsers>0 ==> vp->header is valid.
2574 * N.B. This call can fail if we can't read in the header!! In this case
2575 * we still guarantee we won't context swap, but the ref count won't be
2576 * incremented (otherwise we'd violate the invariant).
2578 /* NOTE: with the demand attach fileserver extensions, the global lock
2579 * is dropped within VHold */
2580 #ifdef AFS_DEMAND_ATTACH_FS
2582 VHold_r(register Volume * vp)
2586 VCreateReservation_r(vp);
2587 VWaitExclusiveState_r(vp);
2589 LoadVolumeHeader(&error, vp);
2591 VCancelReservation_r(vp);
2595 VCancelReservation_r(vp);
2598 #else /* AFS_DEMAND_ATTACH_FS */
2600 VHold_r(register Volume * vp)
2604 LoadVolumeHeader(&error, vp);
2610 #endif /* AFS_DEMAND_ATTACH_FS */
2614 VHold(register Volume * vp)
2618 retVal = VHold_r(vp);
2625 /***************************************************/
2626 /* get and put volume routines */
2627 /***************************************************/
2630 * put back a heavyweight reference to a volume object.
2632 * @param[in] vp volume object pointer
2634 * @pre VOL_LOCK held
2636 * @post heavyweight volume reference put back.
2637 * depending on state, volume may have been taken offline,
2638 * detached, salvaged, freed, etc.
2640 * @internal volume package internal use only
2643 VPutVolume_r(register Volume * vp)
2645 assert(--vp->nUsers >= 0);
2646 if (vp->nUsers == 0) {
2648 ReleaseVolumeHeader(vp->header);
2649 #ifdef AFS_DEMAND_ATTACH_FS
2650 if (!VCheckDetach(vp)) {
2654 #else /* AFS_DEMAND_ATTACH_FS */
2656 #endif /* AFS_DEMAND_ATTACH_FS */
2661 VPutVolume(register Volume * vp)
2669 /* Get a pointer to an attached volume. The pointer is returned regardless
2670 of whether or not the volume is in service or on/off line. An error
2671 code, however, is returned with an indication of the volume's status */
2673 VGetVolume(Error * ec, Error * client_ec, VolId volumeId)
2677 retVal = GetVolume(ec, client_ec, volumeId, NULL, 0);
2683 VGetVolume_r(Error * ec, VolId volumeId)
2685 return GetVolume(ec, NULL, volumeId, NULL, 0);
2688 /* try to get a volume we've previously looked up */
2689 /* for demand attach fs, caller MUST NOT hold a ref count on vp */
2691 VGetVolumeByVp_r(Error * ec, Volume * vp)
2693 return GetVolume(ec, NULL, vp->hashid, vp, 0);
2696 /* private interface for getting a volume handle
2697 * volumeId must be provided.
2698 * hint is an optional parameter to speed up hash lookups
2699 * flags is not used at this time
2701 /* for demand attach fs, caller MUST NOT hold a ref count on hint */
2703 GetVolume(Error * ec, Error * client_ec, VolId volumeId, Volume * hint, int flags)
2706 /* pull this profiling/debugging code out of regular builds */
2708 #define VGET_CTR_INC(x) x++
2709 unsigned short V0 = 0, V1 = 0, V2 = 0, V3 = 0, V5 = 0, V6 =
2710 0, V7 = 0, V8 = 0, V9 = 0;
2711 unsigned short V10 = 0, V11 = 0, V12 = 0, V13 = 0, V14 = 0, V15 = 0;
2713 #define VGET_CTR_INC(x)
2715 #ifdef AFS_DEMAND_ATTACH_FS
2716 Volume *avp, * rvp = hint;
2720 * if VInit is zero, the volume package dynamic
2721 * data structures have not been initialized yet,
2722 * and we must immediately return an error
2728 *client_ec = VOFFLINE;
2733 #ifdef AFS_DEMAND_ATTACH_FS
2735 VCreateReservation_r(rvp);
2737 #endif /* AFS_DEMAND_ATTACH_FS */
2745 vp = VLookupVolume_r(ec, volumeId, vp);
2751 #ifdef AFS_DEMAND_ATTACH_FS
2752 if (rvp && (rvp != vp)) {
2753 /* break reservation on old vp */
2754 VCancelReservation_r(rvp);
2757 #endif /* AFS_DEMAND_ATTACH_FS */
2763 /* Until we have reached an initialization level of 2
2764 * we don't know whether this volume exists or not.
2765 * We can't sleep and retry later because before a volume
2766 * is attached, the caller tries to get it first. Just
2767 * return VOFFLINE and the caller can choose whether to
2768 * retry the command or not. */
2778 IncUInt64(&VStats.hdr_gets);
2780 #ifdef AFS_DEMAND_ATTACH_FS
2781 /* block if someone else is performing an exclusive op on this volume */
2784 VCreateReservation_r(rvp);
2786 VWaitExclusiveState_r(vp);
2788 /* short circuit with VNOVOL in the following circumstances:
2791 * - VOL_STATE_SHUTTING_DOWN
2793 if ((V_attachState(vp) == VOL_STATE_ERROR) ||
2794 (V_attachState(vp) == VOL_STATE_SHUTTING_DOWN) ||
2795 (V_attachState(vp) == VOL_STATE_GOING_OFFLINE)) {
2802 * short circuit with VOFFLINE in the following circumstances:
2804 * - VOL_STATE_UNATTACHED
2806 if (V_attachState(vp) == VOL_STATE_UNATTACHED) {
2807 if (vp->specialStatus) {
2808 *ec = vp->specialStatus;
2816 /* allowable states:
2822 if (vp->salvage.requested) {
2823 VUpdateSalvagePriority_r(vp);
2826 if (V_attachState(vp) == VOL_STATE_PREATTACHED) {
2827 avp = VAttachVolumeByVp_r(ec, vp, 0);
2830 /* VAttachVolumeByVp_r can return a pointer
2831 * != the vp passed to it under certain
2832 * conditions; make sure we don't leak
2833 * reservations if that happens */
2835 VCancelReservation_r(rvp);
2837 VCreateReservation_r(rvp);
2847 if (!vp->pending_vol_op) {
2862 if ((V_attachState(vp) == VOL_STATE_SALVAGING) ||
2863 (*ec == VSALVAGING)) {
2865 /* see CheckVnode() in afsfileprocs.c for an explanation
2866 * of this error code logic */
2867 afs_uint32 now = FT_ApproxTime();
2868 if ((vp->stats.last_salvage + (10 * 60)) >= now) {
2871 *client_ec = VRESTARTING;
2880 LoadVolumeHeader(ec, vp);
2883 /* Only log the error if it was a totally unexpected error. Simply
2884 * a missing inode is likely to be caused by the volume being deleted */
2885 if (errno != ENXIO || LogLevel)
2886 Log("Volume %u: couldn't reread volume header\n",
2888 #ifdef AFS_DEMAND_ATTACH_FS
2889 if (programType == fileServer) {
2890 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
2895 #else /* AFS_DEMAND_ATTACH_FS */
2898 #endif /* AFS_DEMAND_ATTACH_FS */
2902 #ifdef AFS_DEMAND_ATTACH_FS
2904 * this test MUST happen after the volume header is loaded
2907 /* only valid before/during demand attachment */
2908 assert(!vp->pending_vol_op || vp->pending_vol_op->vol_op_state != FSSYNC_VolOpRunningUnknown);
2910 /* deny getvolume due to running mutually exclusive vol op */
2911 if (vp->pending_vol_op && vp->pending_vol_op->vol_op_state==FSSYNC_VolOpRunningOffline) {
2913 * volume cannot remain online during this volume operation.
2916 if (vp->specialStatus) {
2918 * special status codes outrank normal VOFFLINE code
2920 *ec = vp->specialStatus;
2922 *client_ec = vp->specialStatus;
2926 /* see CheckVnode() in afsfileprocs.c for an explanation
2927 * of this error code logic */
2928 afs_uint32 now = FT_ApproxTime();
2929 if ((vp->stats.last_vol_op + (10 * 60)) >= now) {
2932 *client_ec = VRESTARTING;
2937 VChangeState_r(vp, VOL_STATE_UNATTACHED);
2938 FreeVolumeHeader(vp);
2942 #endif /* AFS_DEMAND_ATTACH_FS */
2945 if (vp->shuttingDown) {
2952 if (programType == fileServer) {
2954 if (vp->goingOffline) {
2956 #ifdef AFS_DEMAND_ATTACH_FS
2957 /* wait for the volume to go offline */
2958 if (V_attachState(vp) == VOL_STATE_GOING_OFFLINE) {
2959 VWaitStateChange_r(vp);
2961 #elif defined(AFS_PTHREAD_ENV)
2962 VOL_CV_WAIT(&vol_put_volume_cond);
2963 #else /* AFS_PTHREAD_ENV */
2964 LWP_WaitProcess(VPutVolume);
2965 #endif /* AFS_PTHREAD_ENV */
2968 if (vp->specialStatus) {
2970 *ec = vp->specialStatus;
2971 } else if (V_inService(vp) == 0 || V_blessed(vp) == 0) {
2974 } else if (V_inUse(vp) == 0) {
2985 #ifdef AFS_DEMAND_ATTACH_FS
2986 /* if no error, bump nUsers */
2989 VLRU_UpdateAccess_r(vp);
2992 VCancelReservation_r(rvp);
2995 if (client_ec && !*client_ec) {
2998 #else /* AFS_DEMAND_ATTACH_FS */
2999 /* if no error, bump nUsers */
3006 #endif /* AFS_DEMAND_ATTACH_FS */
3014 /***************************************************/
3015 /* Volume offline/detach routines */
3016 /***************************************************/
3018 /* caller MUST hold a heavyweight ref on vp */
3019 #ifdef AFS_DEMAND_ATTACH_FS
3021 VTakeOffline_r(register Volume * vp)
3025 assert(vp->nUsers > 0);
3026 assert(programType == fileServer);
3028 VCreateReservation_r(vp);
3029 VWaitExclusiveState_r(vp);
3031 vp->goingOffline = 1;
3032 V_needsSalvaged(vp) = 1;
3034 VRequestSalvage_r(&error, vp, SALVSYNC_ERROR, 0);
3035 VCancelReservation_r(vp);
3037 #else /* AFS_DEMAND_ATTACH_FS */
3039 VTakeOffline_r(register Volume * vp)
3041 assert(vp->nUsers > 0);
3042 assert(programType == fileServer);
3044 vp->goingOffline = 1;
3045 V_needsSalvaged(vp) = 1;
3047 #endif /* AFS_DEMAND_ATTACH_FS */
3050 VTakeOffline(register Volume * vp)
3058 * force a volume offline.
3060 * @param[in] vp volume object pointer
3061 * @param[in] flags flags (see note below)
3063 * @note the flag VOL_FORCEOFF_NOUPDATE is a recursion control flag
3064 * used when VUpdateVolume_r needs to call VForceOffline_r
3065 * (which in turn would normally call VUpdateVolume_r)
3067 * @see VUpdateVolume_r
3069 * @pre VOL_LOCK must be held.
3070 * for DAFS, caller must hold ref.
3072 * @note for DAFS, it _is safe_ to call this function from an
3075 * @post needsSalvaged flag is set.
3076 * for DAFS, salvage is requested.
3077 * no further references to the volume through the volume
3078 * package will be honored.
3079 * all file descriptor and vnode caches are invalidated.
3081 * @warning this is a heavy-handed interface. it results in
3082 * a volume going offline regardless of the current
3083 * reference count state.
3085 * @internal volume package internal use only
3088 VForceOffline_r(Volume * vp, int flags)
3092 #ifdef AFS_DEMAND_ATTACH_FS
3093 VChangeState_r(vp, VOL_STATE_ERROR);
3098 strcpy(V_offlineMessage(vp),
3099 "Forced offline due to internal error: volume needs to be salvaged");
3100 Log("Volume %u forced offline: it needs salvaging!\n", V_id(vp));
3103 vp->goingOffline = 0;
3104 V_needsSalvaged(vp) = 1;
3105 if (!(flags & VOL_FORCEOFF_NOUPDATE)) {
3106 VUpdateVolume_r(&error, vp, VOL_UPDATE_NOFORCEOFF);
3109 #ifdef AFS_DEMAND_ATTACH_FS
3110 VRequestSalvage_r(&error, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
3111 #endif /* AFS_DEMAND_ATTACH_FS */
3113 #ifdef AFS_PTHREAD_ENV
3114 assert(pthread_cond_broadcast(&vol_put_volume_cond) == 0);
3115 #else /* AFS_PTHREAD_ENV */
3116 LWP_NoYieldSignal(VPutVolume);
3117 #endif /* AFS_PTHREAD_ENV */
3119 VReleaseVolumeHandles_r(vp);
3123 * force a volume offline.
3125 * @param[in] vp volume object pointer
3127 * @see VForceOffline_r
3130 VForceOffline(Volume * vp)
3133 VForceOffline_r(vp, 0);
3137 /* The opposite of VAttachVolume. The volume header is written to disk, with
3138 the inUse bit turned off. A copy of the header is maintained in memory,
3139 however (which is why this is VOffline, not VDetach).
3142 VOffline_r(Volume * vp, char *message)
3145 VolumeId vid = V_id(vp);
3147 assert(programType != volumeUtility);
3152 if (V_offlineMessage(vp)[0] == '\0')
3153 strncpy(V_offlineMessage(vp), message, sizeof(V_offlineMessage(vp)));
3154 V_offlineMessage(vp)[sizeof(V_offlineMessage(vp)) - 1] = '\0';
3156 vp->goingOffline = 1;
3157 #ifdef AFS_DEMAND_ATTACH_FS
3158 VChangeState_r(vp, VOL_STATE_GOING_OFFLINE);
3159 VCreateReservation_r(vp);
3162 /* wait for the volume to go offline */
3163 if (V_attachState(vp) == VOL_STATE_GOING_OFFLINE) {
3164 VWaitStateChange_r(vp);
3166 VCancelReservation_r(vp);
3167 #else /* AFS_DEMAND_ATTACH_FS */
3169 vp = VGetVolume_r(&error, vid); /* Wait for it to go offline */
3170 if (vp) /* In case it was reattached... */
3172 #endif /* AFS_DEMAND_ATTACH_FS */
3175 #ifdef AFS_DEMAND_ATTACH_FS
3177 * Take a volume offline in order to perform a volume operation.
3179 * @param[inout] ec address in which to store error code
3180 * @param[in] vp volume object pointer
3181 * @param[in] message volume offline status message
3184 * - VOL_LOCK is held
3185 * - caller MUST hold a heavyweight ref on vp
3188 * - volume is taken offline
3189 * - if possible, volume operation is promoted to running state
3190 * - on failure, *ec is set to nonzero
3192 * @note Although this function does not return any value, it may
3193 * still fail to promote our pending volume operation to
3194 * a running state. Any caller MUST check the value of *ec,
3195 * and MUST NOT blindly assume success.
3197 * @warning if the caller does not hold a lightweight ref on vp,
3198 * then it MUST NOT reference vp after this function
3199 * returns to the caller.
3201 * @internal volume package internal use only
3204 VOfflineForVolOp_r(Error *ec, Volume *vp, char *message)
3206 assert(vp->pending_vol_op);
3212 if (V_offlineMessage(vp)[0] == '\0')
3213 strncpy(V_offlineMessage(vp), message, sizeof(V_offlineMessage(vp)));
3214 V_offlineMessage(vp)[sizeof(V_offlineMessage(vp)) - 1] = '\0';
3216 vp->goingOffline = 1;
3217 VChangeState_r(vp, VOL_STATE_GOING_OFFLINE);
3218 VCreateReservation_r(vp);
3221 /* Wait for the volume to go offline */
3222 while (!VIsOfflineState(V_attachState(vp))) {
3223 /* do not give corrupted volumes to the volserver */
3224 if (vp->salvage.requested && vp->pending_vol_op->com.programType != salvageServer) {
3228 VWaitStateChange_r(vp);
3232 VCancelReservation_r(vp);
3234 #endif /* AFS_DEMAND_ATTACH_FS */
3237 VOffline(Volume * vp, char *message)
3240 VOffline_r(vp, message);
3244 /* This gets used for the most part by utility routines that don't want
3245 * to keep all the volume headers around. Generally, the file server won't
3246 * call this routine, because then the offline message in the volume header
3247 * (or other information) won't be available to clients. For NAMEI, also
3248 * close the file handles. However, the fileserver does call this during
3249 * an attach following a volume operation.
3252 VDetachVolume_r(Error * ec, Volume * vp)
3255 struct DiskPartition64 *tpartp;
3256 int notifyServer = 0;
3257 int useDone = FSYNC_VOL_ON;
3259 *ec = 0; /* always "succeeds" */
3260 if (programType == volumeUtility) {
3261 notifyServer = vp->needsPutBack;
3262 if (V_destroyMe(vp) == DESTROY_ME)
3263 useDone = FSYNC_VOL_DONE;
3264 #ifdef AFS_DEMAND_ATTACH_FS
3265 else if (!V_blessed(vp) || !V_inService(vp))
3266 useDone = FSYNC_VOL_LEAVE_OFF;
3269 tpartp = vp->partition;
3271 DeleteVolumeFromHashTable(vp);
3272 vp->shuttingDown = 1;
3273 #ifdef AFS_DEMAND_ATTACH_FS
3274 DeleteVolumeFromVByPList_r(vp);
3276 VChangeState_r(vp, VOL_STATE_SHUTTING_DOWN);
3278 if (programType != fileServer)
3280 #endif /* AFS_DEMAND_ATTACH_FS */
3282 /* Will be detached sometime in the future--this is OK since volume is offline */
3284 /* XXX the following code should really be moved to VCheckDetach() since the volume
3285 * is not technically detached until the refcounts reach zero
3287 #ifdef FSSYNC_BUILD_CLIENT
3288 if (programType == volumeUtility && notifyServer) {
3290 * Note: The server is not notified in the case of a bogus volume
3291 * explicitly to make it possible to create a volume, do a partial
3292 * restore, then abort the operation without ever putting the volume
3293 * online. This is essential in the case of a volume move operation
3294 * between two partitions on the same server. In that case, there
3295 * would be two instances of the same volume, one of them bogus,
3296 * which the file server would attempt to put on line
3298 FSYNC_VolOp(volume, tpartp->name, useDone, 0, NULL);
3299 /* XXX this code path is only hit by volume utilities, thus
3300 * V_BreakVolumeCallbacks will always be NULL. if we really
3301 * want to break callbacks in this path we need to use FSYNC_VolOp() */
3303 /* Dettaching it so break all callbacks on it */
3304 if (V_BreakVolumeCallbacks) {
3305 Log("volume %u detached; breaking all call backs\n", volume);
3306 (*V_BreakVolumeCallbacks) (volume);
3310 #endif /* FSSYNC_BUILD_CLIENT */
3314 VDetachVolume(Error * ec, Volume * vp)
3317 VDetachVolume_r(ec, vp);
3322 /***************************************************/
3323 /* Volume fd/inode handle closing routines */
3324 /***************************************************/
3326 /* For VDetachVolume, we close all cached file descriptors, but keep
3327 * the Inode handles in case we need to read from a busy volume.
3329 /* for demand attach, caller MUST hold ref count on vp */
3331 VCloseVolumeHandles_r(Volume * vp)
3333 #ifdef AFS_DEMAND_ATTACH_FS
3334 VolState state_save;
3336 state_save = VChangeState_r(vp, VOL_STATE_OFFLINING);
3341 * XXX need to investigate whether we can perform
3342 * DFlushVolume outside of vol_glock_mutex...
3344 * VCloseVnodeFiles_r drops the glock internally */
3345 DFlushVolume(V_id(vp));
3346 VCloseVnodeFiles_r(vp);
3348 #ifdef AFS_DEMAND_ATTACH_FS
3352 /* Too time consuming and unnecessary for the volserver */
3353 if (programType != volumeUtility) {
3354 IH_CONDSYNC(vp->vnodeIndex[vLarge].handle);
3355 IH_CONDSYNC(vp->vnodeIndex[vSmall].handle);
3356 IH_CONDSYNC(vp->diskDataHandle);
3358 IH_CONDSYNC(vp->linkHandle);
3359 #endif /* AFS_NT40_ENV */
3362 IH_REALLYCLOSE(vp->vnodeIndex[vLarge].handle);
3363 IH_REALLYCLOSE(vp->vnodeIndex[vSmall].handle);
3364 IH_REALLYCLOSE(vp->diskDataHandle);
3365 IH_REALLYCLOSE(vp->linkHandle);
3367 #ifdef AFS_DEMAND_ATTACH_FS
3369 VChangeState_r(vp, state_save);
3373 /* For both VForceOffline and VOffline, we close all relevant handles.
3374 * For VOffline, if we re-attach the volume, the files may possible be
3375 * different than before.
3377 /* for demand attach, caller MUST hold a ref count on vp */
3379 VReleaseVolumeHandles_r(Volume * vp)
3381 #ifdef AFS_DEMAND_ATTACH_FS
3382 VolState state_save;
3384 state_save = VChangeState_r(vp, VOL_STATE_DETACHING);
3387 /* XXX need to investigate whether we can perform
3388 * DFlushVolume outside of vol_glock_mutex... */
3389 DFlushVolume(V_id(vp));
3391 VReleaseVnodeFiles_r(vp); /* releases the glock internally */
3393 #ifdef AFS_DEMAND_ATTACH_FS
3397 /* Too time consuming and unnecessary for the volserver */
3398 if (programType != volumeUtility) {
3399 IH_CONDSYNC(vp->vnodeIndex[vLarge].handle);
3400 IH_CONDSYNC(vp->vnodeIndex[vSmall].handle);
3401 IH_CONDSYNC(vp->diskDataHandle);
3403 IH_CONDSYNC(vp->linkHandle);
3404 #endif /* AFS_NT40_ENV */
3407 IH_RELEASE(vp->vnodeIndex[vLarge].handle);
3408 IH_RELEASE(vp->vnodeIndex[vSmall].handle);
3409 IH_RELEASE(vp->diskDataHandle);
3410 IH_RELEASE(vp->linkHandle);
3412 #ifdef AFS_DEMAND_ATTACH_FS
3414 VChangeState_r(vp, state_save);
3419 /***************************************************/
3420 /* Volume write and fsync routines */
3421 /***************************************************/
3424 VUpdateVolume_r(Error * ec, Volume * vp, int flags)
3426 #ifdef AFS_DEMAND_ATTACH_FS
3427 VolState state_save;
3429 if (flags & VOL_UPDATE_WAIT) {
3430 VCreateReservation_r(vp);
3431 VWaitExclusiveState_r(vp);
3436 if (programType == fileServer)
3438 (V_inUse(vp) ? V_nextVnodeUnique(vp) +
3439 200 : V_nextVnodeUnique(vp));
3441 #ifdef AFS_DEMAND_ATTACH_FS
3442 state_save = VChangeState_r(vp, VOL_STATE_UPDATING);
3446 WriteVolumeHeader_r(ec, vp);
3448 #ifdef AFS_DEMAND_ATTACH_FS
3450 VChangeState_r(vp, state_save);
3451 if (flags & VOL_UPDATE_WAIT) {
3452 VCancelReservation_r(vp);
3457 Log("VUpdateVolume: error updating volume header, volume %u (%s)\n",
3458 V_id(vp), V_name(vp));
3459 /* try to update on-disk header,
3460 * while preventing infinite recursion */
3461 if (!(flags & VOL_UPDATE_NOFORCEOFF)) {
3462 VForceOffline_r(vp, VOL_FORCEOFF_NOUPDATE);
3468 VUpdateVolume(Error * ec, Volume * vp)
3471 VUpdateVolume_r(ec, vp, VOL_UPDATE_WAIT);
3476 VSyncVolume_r(Error * ec, Volume * vp, int flags)
3480 #ifdef AFS_DEMAND_ATTACH_FS
3481 VolState state_save;
3484 if (flags & VOL_SYNC_WAIT) {
3485 VUpdateVolume_r(ec, vp, VOL_UPDATE_WAIT);
3487 VUpdateVolume_r(ec, vp, 0);
3490 #ifdef AFS_DEMAND_ATTACH_FS
3491 state_save = VChangeState_r(vp, VOL_STATE_UPDATING);
3494 fdP = IH_OPEN(V_diskDataHandle(vp));
3495 assert(fdP != NULL);
3496 code = FDH_SYNC(fdP);
3499 #ifdef AFS_DEMAND_ATTACH_FS
3501 VChangeState_r(vp, state_save);
3507 VSyncVolume(Error * ec, Volume * vp)
3510 VSyncVolume_r(ec, vp, VOL_SYNC_WAIT);
3515 /***************************************************/
3516 /* Volume dealloaction routines */
3517 /***************************************************/
3519 #ifdef AFS_DEMAND_ATTACH_FS
3521 FreeVolume(Volume * vp)
3523 /* free the heap space, iff it's safe.
3524 * otherwise, pull it out of the hash table, so it
3525 * will get deallocated when all refs to it go away */
3526 if (!VCheckFree(vp)) {
3527 DeleteVolumeFromHashTable(vp);
3528 DeleteVolumeFromVByPList_r(vp);
3530 /* make sure we invalidate the header cache entry */
3531 FreeVolumeHeader(vp);
3534 #endif /* AFS_DEMAND_ATTACH_FS */
3537 ReallyFreeVolume(Volume * vp)
3542 #ifdef AFS_DEMAND_ATTACH_FS
3544 VChangeState_r(vp, VOL_STATE_FREED);
3545 if (vp->pending_vol_op)
3546 free(vp->pending_vol_op);
3547 #endif /* AFS_DEMAND_ATTACH_FS */
3548 for (i = 0; i < nVNODECLASSES; i++)
3549 if (vp->vnodeIndex[i].bitmap)
3550 free(vp->vnodeIndex[i].bitmap);
3551 FreeVolumeHeader(vp);
3552 #ifndef AFS_DEMAND_ATTACH_FS
3553 DeleteVolumeFromHashTable(vp);
3554 #endif /* AFS_DEMAND_ATTACH_FS */
3558 /* check to see if we should shutdown this volume
3559 * returns 1 if volume was freed, 0 otherwise */
3560 #ifdef AFS_DEMAND_ATTACH_FS
3562 VCheckDetach(register Volume * vp)
3567 if (vp->nUsers || vp->nWaiters)
3570 if (vp->shuttingDown) {
3572 if ((programType != fileServer) &&
3573 (V_inUse(vp) == programType) &&
3574 ((V_checkoutMode(vp) == V_VOLUPD) ||
3575 (V_checkoutMode(vp) == V_SECRETLY) ||
3576 ((V_checkoutMode(vp) == V_CLONE) &&
3577 (VolumeWriteable(vp))))) {
3579 VUpdateVolume_r(&ec, vp, VOL_UPDATE_NOFORCEOFF);
3581 Log("VCheckDetach: volume header update for volume %u "
3582 "failed with errno %d\n", vp->hashid, errno);
3585 VReleaseVolumeHandles_r(vp);
3587 ReallyFreeVolume(vp);
3588 if (programType == fileServer) {
3589 assert(pthread_cond_broadcast(&vol_put_volume_cond) == 0);
3594 #else /* AFS_DEMAND_ATTACH_FS */
3596 VCheckDetach(register Volume * vp)
3604 if (vp->shuttingDown) {
3606 if ((programType != fileServer) &&
3607 (V_inUse(vp) == programType) &&
3608 ((V_checkoutMode(vp) == V_VOLUPD) ||
3609 (V_checkoutMode(vp) == V_SECRETLY) ||
3610 ((V_checkoutMode(vp) == V_CLONE) &&
3611 (VolumeWriteable(vp))))) {
3613 VUpdateVolume_r(&ec, vp, VOL_UPDATE_NOFORCEOFF);
3615 Log("VCheckDetach: volume header update for volume %u failed with errno %d\n",
3619 VReleaseVolumeHandles_r(vp);
3620 ReallyFreeVolume(vp);
3621 if (programType == fileServer) {
3622 #if defined(AFS_PTHREAD_ENV)
3623 assert(pthread_cond_broadcast(&vol_put_volume_cond) == 0);
3624 #else /* AFS_PTHREAD_ENV */
3625 LWP_NoYieldSignal(VPutVolume);
3626 #endif /* AFS_PTHREAD_ENV */
3631 #endif /* AFS_DEMAND_ATTACH_FS */
3633 /* check to see if we should offline this volume
3634 * return 1 if volume went offline, 0 otherwise */
3635 #ifdef AFS_DEMAND_ATTACH_FS
3637 VCheckOffline(register Volume * vp)
3641 if (vp->goingOffline && !vp->nUsers) {
3643 assert(programType == fileServer);
3644 assert((V_attachState(vp) != VOL_STATE_ATTACHED) &&
3645 (V_attachState(vp) != VOL_STATE_FREED) &&
3646 (V_attachState(vp) != VOL_STATE_PREATTACHED) &&
3647 (V_attachState(vp) != VOL_STATE_UNATTACHED));
3651 * VOL_STATE_GOING_OFFLINE
3652 * VOL_STATE_SHUTTING_DOWN
3653 * VIsErrorState(V_attachState(vp))
3654 * VIsExclusiveState(V_attachState(vp))
3657 VCreateReservation_r(vp);
3658 VChangeState_r(vp, VOL_STATE_OFFLINING);
3661 /* must clear the goingOffline flag before we drop the glock */
3662 vp->goingOffline = 0;
3667 /* perform async operations */
3668 VUpdateVolume_r(&error, vp, 0);
3669 VCloseVolumeHandles_r(vp);
3672 Log("VOffline: Volume %u (%s) is now offline", V_id(vp),
3674 if (V_offlineMessage(vp)[0])
3675 Log(" (%s)", V_offlineMessage(vp));
3679 /* invalidate the volume header cache entry */
3680 FreeVolumeHeader(vp);
3682 /* if nothing changed state to error or salvaging,
3683 * drop state to unattached */
3684 if (!VIsErrorState(V_attachState(vp))) {
3685 VChangeState_r(vp, VOL_STATE_UNATTACHED);
3687 VCancelReservation_r(vp);
3688 /* no usage of vp is safe beyond this point */
3692 #else /* AFS_DEMAND_ATTACH_FS */
3694 VCheckOffline(register Volume * vp)
3698 if (vp->goingOffline && !vp->nUsers) {
3700 assert(programType == fileServer);
3703 vp->goingOffline = 0;
3705 VUpdateVolume_r(&error, vp, 0);
3706 VCloseVolumeHandles_r(vp);
3708 Log("VOffline: Volume %u (%s) is now offline", V_id(vp),
3710 if (V_offlineMessage(vp)[0])
3711 Log(" (%s)", V_offlineMessage(vp));
3714 FreeVolumeHeader(vp);
3715 #ifdef AFS_PTHREAD_ENV
3716 assert(pthread_cond_broadcast(&vol_put_volume_cond) == 0);
3717 #else /* AFS_PTHREAD_ENV */
3718 LWP_NoYieldSignal(VPutVolume);
3719 #endif /* AFS_PTHREAD_ENV */
3723 #endif /* AFS_DEMAND_ATTACH_FS */
3725 /***************************************************/
3726 /* demand attach fs ref counting routines */
3727 /***************************************************/
3729 #ifdef AFS_DEMAND_ATTACH_FS
3730 /* the following two functions handle reference counting for
3731 * asynchronous operations on volume structs.
3733 * their purpose is to prevent a VDetachVolume or VShutdown
3734 * from free()ing the Volume struct during an async i/o op */
3736 /* register with the async volume op ref counter */
3737 /* VCreateReservation_r moved into inline code header because it
3738 * is now needed in vnode.c -- tkeiser 11/20/2007
3742 * decrement volume-package internal refcount.
3744 * @param vp volume object pointer
3746 * @internal volume package internal use only
3749 * @arg VOL_LOCK is held
3750 * @arg lightweight refcount held
3752 * @post volume waiters refcount is decremented; volume may
3753 * have been deallocated/shutdown/offlined/salvaged/
3754 * whatever during the process
3756 * @warning once you have tossed your last reference (you can acquire
3757 * lightweight refs recursively) it is NOT SAFE to reference
3758 * a volume object pointer ever again
3760 * @see VCreateReservation_r
3762 * @note DEMAND_ATTACH_FS only
3765 VCancelReservation_r(Volume * vp)
3767 assert(--vp->nWaiters >= 0);
3768 if (vp->nWaiters == 0) {
3770 if (!VCheckDetach(vp)) {
3777 /* check to see if we should free this volume now
3778 * return 1 if volume was freed, 0 otherwise */
3780 VCheckFree(Volume * vp)
3783 if ((vp->nUsers == 0) &&
3784 (vp->nWaiters == 0) &&
3785 !(V_attachFlags(vp) & (VOL_IN_HASH |
3789 ReallyFreeVolume(vp);
3794 #endif /* AFS_DEMAND_ATTACH_FS */
3797 /***************************************************/
3798 /* online volume operations routines */
3799 /***************************************************/
3801 #ifdef AFS_DEMAND_ATTACH_FS
3803 * register a volume operation on a given volume.
3805 * @param[in] vp volume object
3806 * @param[in] vopinfo volume operation info object
3808 * @pre VOL_LOCK is held
3810 * @post volume operation info object attached to volume object.
3811 * volume operation statistics updated.
3813 * @note by "attached" we mean a copy of the passed in object is made
3815 * @internal volume package internal use only
3818 VRegisterVolOp_r(Volume * vp, FSSYNC_VolOp_info * vopinfo)
3820 FSSYNC_VolOp_info * info;
3822 /* attach a vol op info node to the volume struct */
3823 info = (FSSYNC_VolOp_info *) malloc(sizeof(FSSYNC_VolOp_info));
3824 assert(info != NULL);
3825 memcpy(info, vopinfo, sizeof(FSSYNC_VolOp_info));
3826 vp->pending_vol_op = info;
3829 vp->stats.last_vol_op = FT_ApproxTime();
3830 vp->stats.vol_ops++;
3831 IncUInt64(&VStats.vol_ops);
3837 * deregister the volume operation attached to this volume.
3839 * @param[in] vp volume object pointer
3841 * @pre VOL_LOCK is held
3843 * @post the volume operation info object is detached from the volume object
3845 * @internal volume package internal use only
3848 VDeregisterVolOp_r(Volume * vp)
3850 if (vp->pending_vol_op) {
3851 free(vp->pending_vol_op);
3852 vp->pending_vol_op = NULL;
3856 #endif /* AFS_DEMAND_ATTACH_FS */
3859 * determine whether it is safe to leave a volume online during
3860 * the volume operation described by the vopinfo object.
3862 * @param[in] vp volume object
3863 * @param[in] vopinfo volume operation info object
3865 * @return whether it is safe to leave volume online
3866 * @retval 0 it is NOT SAFE to leave the volume online
3867 * @retval 1 it is safe to leave the volume online during the operation
3870 * @arg VOL_LOCK is held
3871 * @arg disk header attached to vp (heavyweight ref on vp will guarantee
3872 * this condition is met)
3874 * @internal volume package internal use only
3877 VVolOpLeaveOnline_r(Volume * vp, FSSYNC_VolOp_info * vopinfo)
3879 return (vopinfo->vol_op_state == FSSYNC_VolOpRunningOnline ||
3880 (vopinfo->com.command == FSYNC_VOL_NEEDVOLUME &&
3881 (vopinfo->com.reason == V_READONLY ||
3882 (!VolumeWriteable(vp) &&
3883 (vopinfo->com.reason == V_CLONE ||
3884 vopinfo->com.reason == V_DUMP)))));
3888 * determine whether VBUSY should be set during this volume operation.
3890 * @param[in] vp volume object
3891 * @param[in] vopinfo volume operation info object
3893 * @return whether VBUSY should be set
3894 * @retval 0 VBUSY does NOT need to be set
3895 * @retval 1 VBUSY SHOULD be set
3897 * @pre VOL_LOCK is held
3899 * @internal volume package internal use only
3902 VVolOpSetVBusy_r(Volume * vp, FSSYNC_VolOp_info * vopinfo)
3904 return ((vopinfo->com.command == FSYNC_VOL_OFF &&
3905 vopinfo->com.reason == FSYNC_SALVAGE) ||
3906 (vopinfo->com.command == FSYNC_VOL_NEEDVOLUME &&
3907 (vopinfo->com.reason == V_CLONE ||
3908 vopinfo->com.reason == V_DUMP)));
3912 /***************************************************/
3913 /* online salvager routines */
3914 /***************************************************/
3915 #if defined(AFS_DEMAND_ATTACH_FS)
3916 #define SALVAGE_PRIO_UPDATE_INTERVAL 3 /**< number of seconds between prio updates */
3917 #define SALVAGE_COUNT_MAX 16 /**< number of online salvages we
3918 * allow before moving the volume
3919 * into a permanent error state
3921 * once this threshold is reached,
3922 * the operator will have to manually
3923 * issue a 'bos salvage' to bring
3924 * the volume back online
3928 * check whether a salvage needs to be performed on this volume.
3930 * @param[in] vp pointer to volume object
3932 * @return status code
3933 * @retval 0 no salvage scheduled
3934 * @retval 1 a salvage has been scheduled with the salvageserver
3936 * @pre VOL_LOCK is held
3938 * @post if salvage request flag is set and nUsers and nWaiters are zero,
3939 * then a salvage will be requested
3941 * @note this is one of the event handlers called by VCancelReservation_r
3943 * @see VCancelReservation_r
3945 * @internal volume package internal use only.
3948 VCheckSalvage(register Volume * vp)
3951 #ifdef SALVSYNC_BUILD_CLIENT
3952 if (vp->nUsers || vp->nWaiters)
3954 if (vp->salvage.requested) {
3955 VScheduleSalvage_r(vp);
3958 #endif /* SALVSYNC_BUILD_CLIENT */
3963 * request volume salvage.
3965 * @param[out] ec computed client error code
3966 * @param[in] vp volume object pointer
3967 * @param[in] reason reason code (passed to salvageserver via SALVSYNC)
3968 * @param[in] flags see flags note below
3971 * VOL_SALVAGE_INVALIDATE_HEADER causes volume header cache entry
3972 * to be invalidated.
3974 * @pre VOL_LOCK is held.
3976 * @post volume state is changed.
3977 * for fileserver, salvage will be requested once refcount reaches zero.
3979 * @return operation status code
3980 * @retval 0 volume salvage will occur
3981 * @retval 1 volume salvage could not be scheduled
3983 * @note DAFS fileserver only
3985 * @note this call does not synchronously schedule a volume salvage. rather,
3986 * it sets volume state so that when volume refcounts reach zero, a
3987 * volume salvage will occur. by "refcounts", we mean both nUsers and
3988 * nWaiters must be zero.
3990 * @internal volume package internal use only.
3993 VRequestSalvage_r(Error * ec, Volume * vp, int reason, int flags)
3997 * for DAFS volume utilities, transition to error state
3998 * (at some point in the future, we should consider
3999 * making volser talk to salsrv)
4001 if (programType != fileServer) {
4002 VChangeState_r(vp, VOL_STATE_ERROR);
4007 if (!vp->salvage.requested) {
4008 vp->salvage.requested = 1;
4009 vp->salvage.reason = reason;
4010 vp->stats.last_salvage = FT_ApproxTime();
4011 if (VIsSalvager(V_inUse(vp))) {
4012 Log("VRequestSalvage: volume %u appears to be salvaging, but we\n", vp->hashid);
4013 Log(" didn't request a salvage. Forcing it offline waiting for the\n");
4014 Log(" salvage to finish; if you are sure no salvage is running,\n");
4015 Log(" run a salvage manually.\n");
4017 /* make sure neither VScheduleSalvage_r nor
4018 * VUpdateSalvagePriority_r try to schedule another salvage */
4019 vp->salvage.requested = vp->salvage.scheduled = 0;
4021 /* these stats aren't correct, but doing this makes them
4022 * slightly closer to being correct */
4023 vp->stats.salvages++;
4024 vp->stats.last_salvage_req = FT_ApproxTime();
4025 IncUInt64(&VStats.salvages);
4027 VChangeState_r(vp, VOL_STATE_ERROR);
4031 } else if (vp->stats.salvages < SALVAGE_COUNT_MAX) {
4032 VChangeState_r(vp, VOL_STATE_SALVAGING);
4035 Log("VRequestSalvage: volume %u online salvaged too many times; forced offline.\n", vp->hashid);
4036 VChangeState_r(vp, VOL_STATE_ERROR);
4040 if (flags & VOL_SALVAGE_INVALIDATE_HEADER) {
4041 /* Instead of ReleaseVolumeHeader, we do FreeVolumeHeader()
4042 so that the the next VAttachVolumeByVp_r() invocation
4043 of attach2() will pull in a cached header
4044 entry and fail, then load a fresh one from disk and attach
4047 FreeVolumeHeader(vp);
4054 * update salvageserver scheduling priority for a volume.
4056 * @param[in] vp pointer to volume object
4058 * @return operation status
4060 * @retval 1 request denied, or SALVSYNC communications failure
4062 * @pre VOL_LOCK is held.
4064 * @post in-core salvage priority counter is incremented. if at least
4065 * SALVAGE_PRIO_UPDATE_INTERVAL seconds have elapsed since the
4066 * last SALVSYNC_RAISEPRIO request, we contact the salvageserver
4067 * to update its priority queue. if no salvage is scheduled,
4068 * this function is a no-op.
4070 * @note DAFS fileserver only
4072 * @note this should be called whenever a VGetVolume fails due to a
4073 * pending salvage request
4075 * @todo should set exclusive state and drop glock around salvsync call
4077 * @internal volume package internal use only.
4080 VUpdateSalvagePriority_r(Volume * vp)
4085 #ifdef SALVSYNC_BUILD_CLIENT
4087 now = FT_ApproxTime();
4089 /* update the salvageserver priority queue occasionally so that
4090 * frequently requested volumes get moved to the head of the queue
4092 if ((vp->salvage.scheduled) &&
4093 (vp->stats.last_salvage_req < (now-SALVAGE_PRIO_UPDATE_INTERVAL))) {
4094 code = SALVSYNC_SalvageVolume(vp->hashid,
4095 VPartitionPath(vp->partition),
4100 vp->stats.last_salvage_req = now;
4101 if (code != SYNC_OK) {
4105 #endif /* SALVSYNC_BUILD_CLIENT */
4111 * schedule a salvage with the salvage server.
4113 * @param[in] vp pointer to volume object
4115 * @return operation status
4116 * @retval 0 salvage scheduled successfully
4117 * @retval 1 salvage not scheduled, or SALVSYNC com error
4120 * @arg VOL_LOCK is held.
4121 * @arg nUsers and nWaiters should be zero.
4123 * @post salvageserver is sent a salvage request
4125 * @note DAFS fileserver only
4127 * @internal volume package internal use only.
4130 VScheduleSalvage_r(Volume * vp)
4133 #ifdef SALVSYNC_BUILD_CLIENT
4134 VolState state_save;
4135 VThreadOptions_t * thread_opts;
4138 if (vp->nWaiters || vp->nUsers) {
4142 /* prevent endless salvage,attach,salvage,attach,... loops */
4143 if (vp->stats.salvages >= SALVAGE_COUNT_MAX)
4147 * don't perform salvsync ops on certain threads
4149 thread_opts = pthread_getspecific(VThread_key);
4150 if (thread_opts == NULL) {
4151 thread_opts = &VThread_defaults;
4153 if (thread_opts->disallow_salvsync) {
4158 * XXX the scheduling process should really be done asynchronously
4159 * to avoid fssync deadlocks
4161 if (!vp->salvage.scheduled) {
4162 /* if we haven't previously scheduled a salvage, do so now
4164 * set the volume to an exclusive state and drop the lock
4165 * around the SALVSYNC call
4167 * note that we do NOT acquire a reservation here -- doing so
4168 * could result in unbounded recursion
4170 strlcpy(partName, VPartitionPath(vp->partition), sizeof(partName));
4171 state_save = VChangeState_r(vp, VOL_STATE_SALVSYNC_REQ);
4174 /* can't use V_id() since there's no guarantee
4175 * we have the disk data header at this point */
4176 code = SALVSYNC_SalvageVolume(vp->hashid,
4183 VChangeState_r(vp, state_save);
4185 if (code == SYNC_OK) {
4186 vp->salvage.scheduled = 1;
4187 vp->stats.salvages++;
4188 vp->stats.last_salvage_req = FT_ApproxTime();
4189 IncUInt64(&VStats.salvages);
4193 case SYNC_BAD_COMMAND:
4194 case SYNC_COM_ERROR:
4197 Log("VScheduleSalvage_r: SALVSYNC request denied\n");
4200 Log("VScheduleSalvage_r: SALVSYNC unknown protocol error\n");
4205 #endif /* SALVSYNC_BUILD_CLIENT */
4209 #ifdef SALVSYNC_BUILD_CLIENT
4211 * connect to the salvageserver SYNC service.
4213 * @return operation status
4217 * @post connection to salvageserver SYNC service established
4219 * @see VConnectSALV_r
4220 * @see VDisconnectSALV
4221 * @see VReconnectSALV
4228 retVal = VConnectSALV_r();
4234 * connect to the salvageserver SYNC service.
4236 * @return operation status
4240 * @pre VOL_LOCK is held.
4242 * @post connection to salvageserver SYNC service established
4245 * @see VDisconnectSALV_r
4246 * @see VReconnectSALV_r
4247 * @see SALVSYNC_clientInit
4249 * @internal volume package internal use only.
4252 VConnectSALV_r(void)
4254 return SALVSYNC_clientInit();
4258 * disconnect from the salvageserver SYNC service.
4260 * @return operation status
4263 * @pre client should have a live connection to the salvageserver
4265 * @post connection to salvageserver SYNC service destroyed
4267 * @see VDisconnectSALV_r
4269 * @see VReconnectSALV
4272 VDisconnectSALV(void)
4276 VDisconnectSALV_r();
4282 * disconnect from the salvageserver SYNC service.
4284 * @return operation status
4288 * @arg VOL_LOCK is held.
4289 * @arg client should have a live connection to the salvageserver.
4291 * @post connection to salvageserver SYNC service destroyed
4293 * @see VDisconnectSALV
4294 * @see VConnectSALV_r
4295 * @see VReconnectSALV_r
4296 * @see SALVSYNC_clientFinis
4298 * @internal volume package internal use only.
4301 VDisconnectSALV_r(void)
4303 return SALVSYNC_clientFinis();
4307 * disconnect and then re-connect to the salvageserver SYNC service.
4309 * @return operation status
4313 * @pre client should have a live connection to the salvageserver
4315 * @post old connection is dropped, and a new one is established
4318 * @see VDisconnectSALV
4319 * @see VReconnectSALV_r
4322 VReconnectSALV(void)
4326 retVal = VReconnectSALV_r();
4332 * disconnect and then re-connect to the salvageserver SYNC service.
4334 * @return operation status
4339 * @arg VOL_LOCK is held.
4340 * @arg client should have a live connection to the salvageserver.
4342 * @post old connection is dropped, and a new one is established
4344 * @see VConnectSALV_r
4345 * @see VDisconnectSALV
4346 * @see VReconnectSALV
4347 * @see SALVSYNC_clientReconnect
4349 * @internal volume package internal use only.
4352 VReconnectSALV_r(void)
4354 return SALVSYNC_clientReconnect();
4356 #endif /* SALVSYNC_BUILD_CLIENT */
4357 #endif /* AFS_DEMAND_ATTACH_FS */
4360 /***************************************************/
4361 /* FSSYNC routines */
4362 /***************************************************/
4364 /* This must be called by any volume utility which needs to run while the
4365 file server is also running. This is separated from VInitVolumePackage so
4366 that a utility can fork--and each of the children can independently
4367 initialize communication with the file server */
4368 #ifdef FSSYNC_BUILD_CLIENT
4370 * connect to the fileserver SYNC service.
4372 * @return operation status
4377 * @arg VInit must equal 2.
4378 * @arg Program Type must not be fileserver or salvager.
4380 * @post connection to fileserver SYNC service established
4383 * @see VDisconnectFS
4384 * @see VChildProcReconnectFS
4391 retVal = VConnectFS_r();
4397 * connect to the fileserver SYNC service.
4399 * @return operation status
4404 * @arg VInit must equal 2.
4405 * @arg Program Type must not be fileserver or salvager.
4406 * @arg VOL_LOCK is held.
4408 * @post connection to fileserver SYNC service established
4411 * @see VDisconnectFS_r
4412 * @see VChildProcReconnectFS_r
4414 * @internal volume package internal use only.
4420 assert((VInit == 2) &&
4421 (programType != fileServer) &&
4422 (programType != salvager));
4423 rc = FSYNC_clientInit();
4430 * disconnect from the fileserver SYNC service.
4433 * @arg client should have a live connection to the fileserver.
4434 * @arg VOL_LOCK is held.
4435 * @arg Program Type must not be fileserver or salvager.
4437 * @post connection to fileserver SYNC service destroyed
4439 * @see VDisconnectFS
4441 * @see VChildProcReconnectFS_r
4443 * @internal volume package internal use only.
4446 VDisconnectFS_r(void)
4448 assert((programType != fileServer) &&
4449 (programType != salvager));
4450 FSYNC_clientFinis();
4455 * disconnect from the fileserver SYNC service.
4458 * @arg client should have a live connection to the fileserver.
4459 * @arg Program Type must not be fileserver or salvager.
4461 * @post connection to fileserver SYNC service destroyed
4463 * @see VDisconnectFS_r
4465 * @see VChildProcReconnectFS
4476 * connect to the fileserver SYNC service from a child process following a fork.
4478 * @return operation status
4483 * @arg VOL_LOCK is held.
4484 * @arg current FSYNC handle is shared with a parent process
4486 * @post current FSYNC handle is discarded and a new connection to the
4487 * fileserver SYNC service is established
4489 * @see VChildProcReconnectFS
4491 * @see VDisconnectFS_r
4493 * @internal volume package internal use only.
4496 VChildProcReconnectFS_r(void)
4498 return FSYNC_clientChildProcReconnect();
4502 * connect to the fileserver SYNC service from a child process following a fork.
4504 * @return operation status
4508 * @pre current FSYNC handle is shared with a parent process
4510 * @post current FSYNC handle is discarded and a new connection to the
4511 * fileserver SYNC service is established
4513 * @see VChildProcReconnectFS_r
4515 * @see VDisconnectFS
4518 VChildProcReconnectFS(void)
4522 ret = VChildProcReconnectFS_r();
4526 #endif /* FSSYNC_BUILD_CLIENT */
4529 /***************************************************/
4530 /* volume bitmap routines */
4531 /***************************************************/
4534 * For demand attach fs, flags parameter controls
4535 * locking behavior. If (flags & VOL_ALLOC_BITMAP_WAIT)
4536 * is set, then this function will create a reservation
4537 * and block on any other exclusive operations. Otherwise,
4538 * this function assumes the caller already has exclusive
4539 * access to vp, and we just change the volume state.
4542 VAllocBitmapEntry_r(Error * ec, Volume * vp,
4543 struct vnodeIndex *index, int flags)
4546 register byte *bp, *ep;
4547 #ifdef AFS_DEMAND_ATTACH_FS
4548 VolState state_save;
4549 #endif /* AFS_DEMAND_ATTACH_FS */
4553 /* This test is probably redundant */
4554 if (!VolumeWriteable(vp)) {
4555 *ec = (bit32) VREADONLY;
4559 #ifdef AFS_DEMAND_ATTACH_FS
4560 if (flags & VOL_ALLOC_BITMAP_WAIT) {
4561 VCreateReservation_r(vp);
4562 VWaitExclusiveState_r(vp);
4564 state_save = VChangeState_r(vp, VOL_STATE_GET_BITMAP);
4565 #endif /* AFS_DEMAND_ATTACH_FS */
4568 if ((programType == fileServer) && !index->bitmap) {
4570 #ifndef AFS_DEMAND_ATTACH_FS
4571 /* demand attach fs uses the volume state to avoid races.
4572 * specialStatus field is not used at all */
4574 if (vp->specialStatus == VBUSY) {
4575 if (vp->goingOffline) { /* vos dump waiting for the volume to