2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
9 * Portions Copyright (c) 2005-2008 Sine Nomine Associates
12 /* 1/1/89: NB: this stuff is all going to be replaced. Don't take it too seriously */
17 Institution: The Information Technology Center, Carnegie-Mellon University
21 #include <afsconfig.h>
22 #include <afs/param.h>
28 #include <afs/afsint.h>
31 #include <sys/param.h>
32 #if !defined(AFS_SGI_ENV)
35 #else /* AFS_OSF_ENV */
36 #ifdef AFS_VFSINCL_ENV
39 #include <sys/fs/ufs_fs.h>
41 #if defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
42 #include <ufs/ufs/dinode.h>
43 #include <ufs/ffs/fs.h>
48 #else /* AFS_VFSINCL_ENV */
49 #if !defined(AFS_AIX_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_XBSD_ENV)
52 #endif /* AFS_VFSINCL_ENV */
53 #endif /* AFS_OSF_ENV */
54 #endif /* AFS_SGI_ENV */
55 #endif /* AFS_NT40_ENV */
73 #if defined(AFS_SUN_ENV) || defined(AFS_SUN5_ENV)
75 #include <sys/mnttab.h>
76 #include <sys/mntent.h>
82 #if defined(AFS_SGI_ENV)
87 #ifndef AFS_LINUX20_ENV
88 #include <fstab.h> /* Need to find in libc 5, present in libc 6 */
91 #endif /* AFS_SGI_ENV */
93 #endif /* AFS_HPUX_ENV */
97 #include <netinet/in.h>
101 #include <sys/time.h>
102 #endif /* ITIMER_REAL */
103 #endif /* AFS_NT40_ENV */
104 #if defined(AFS_SUN5_ENV) || defined(AFS_NT40_ENV) || defined(AFS_LINUX20_ENV)
111 #include <afs/errors.h>
114 #include <afs/afssyscalls.h>
116 #include <afs/afsutil.h>
120 #include "daemon_com.h"
122 #include "salvsync.h"
125 #include "partition.h"
126 #include "volume_inline.h"
127 #ifdef AFS_PTHREAD_ENV
129 #else /* AFS_PTHREAD_ENV */
130 #include "afs/assert.h"
131 #endif /* AFS_PTHREAD_ENV */
138 #if !defined(offsetof)
143 #define afs_stat stat64
144 #define afs_fstat fstat64
145 #define afs_open open64
146 #else /* !O_LARGEFILE */
147 #define afs_stat stat
148 #define afs_fstat fstat
149 #define afs_open open
150 #endif /* !O_LARGEFILE */
152 #ifdef AFS_PTHREAD_ENV
153 pthread_mutex_t vol_glock_mutex;
154 pthread_mutex_t vol_trans_mutex;
155 pthread_cond_t vol_put_volume_cond;
156 pthread_cond_t vol_sleep_cond;
157 int vol_attach_threads = 1;
158 #endif /* AFS_PTHREAD_ENV */
160 #ifdef AFS_DEMAND_ATTACH_FS
161 pthread_mutex_t vol_salvsync_mutex;
162 #endif /* AFS_DEMAND_ATTACH_FS */
165 extern void *calloc(), *realloc();
168 /*@printflike@*/ extern void Log(const char *format, ...);
170 /* Forward declarations */
171 static Volume *attach2(Error * ec, VolId vid, char *path,
172 register struct VolumeHeader *header,
173 struct DiskPartition *partp, Volume * vp,
174 int isbusy, int mode);
175 static void ReallyFreeVolume(Volume * vp);
176 #ifdef AFS_DEMAND_ATTACH_FS
177 static void FreeVolume(Volume * vp);
178 #else /* !AFS_DEMAND_ATTACH_FS */
179 #define FreeVolume(vp) ReallyFreeVolume(vp)
180 static void VScanUpdateList(void);
181 #endif /* !AFS_DEMAND_ATTACH_FS */
182 static void VInitVolumeHeaderCache(afs_uint32 howMany);
183 static int GetVolumeHeader(register Volume * vp);
184 static void ReleaseVolumeHeader(register struct volHeader *hd);
185 static void FreeVolumeHeader(register Volume * vp);
186 static void AddVolumeToHashTable(register Volume * vp, int hashid);
187 static void DeleteVolumeFromHashTable(register Volume * vp);
188 static int VHold(Volume * vp);
189 static int VHold_r(Volume * vp);
190 static void VGetBitmap_r(Error * ec, Volume * vp, VnodeClass class);
191 static void GetVolumePath(Error * ec, VolId volumeId, char **partitionp,
193 static void VReleaseVolumeHandles_r(Volume * vp);
194 static void VCloseVolumeHandles_r(Volume * vp);
195 static void LoadVolumeHeader(Error * ec, Volume * vp);
196 static int VCheckOffline(register Volume * vp);
197 static int VCheckDetach(register Volume * vp);
198 static Volume * GetVolume(Error * ec, Error * client_ec, VolId volumeId, Volume * hint, int flags);
199 static int VolumeExternalName_r(VolumeId volumeId, char * name, size_t len);
201 int LogLevel; /* Vice loglevel--not defined as extern so that it will be
202 * defined when not linked with vice, XXXX */
203 ProgramType programType; /* The type of program using the package */
205 /* extended volume package statistics */
208 #ifdef VOL_LOCK_DEBUG
209 pthread_t vol_glock_holder = 0;
213 #define VOLUME_BITMAP_GROWSIZE 16 /* bytes, => 128vnodes */
214 /* Must be a multiple of 4 (1 word) !! */
216 /* this parameter needs to be tunable at runtime.
217 * 128 was really inadequate for largish servers -- at 16384 volumes this
218 * puts average chain length at 128, thus an average 65 deref's to find a volptr.
219 * talk about bad spatial locality...
221 * an AVL or splay tree might work a lot better, but we'll just increase
222 * the default hash table size for now
224 #define DEFAULT_VOLUME_HASH_SIZE 256 /* Must be a power of 2!! */
225 #define DEFAULT_VOLUME_HASH_MASK (DEFAULT_VOLUME_HASH_SIZE-1)
226 #define VOLUME_HASH(volumeId) (volumeId&(VolumeHashTable.Mask))
229 * turn volume hash chains into partially ordered lists.
230 * when the threshold is exceeded between two adjacent elements,
231 * perform a chain rebalancing operation.
233 * keep the threshold high in order to keep cache line invalidates
234 * low "enough" on SMPs
236 #define VOLUME_HASH_REORDER_THRESHOLD 200
239 * when possible, don't just reorder single elements, but reorder
240 * entire chains of elements at once. a chain of elements that
241 * exceed the element previous to the pivot by at least CHAIN_THRESH
242 * accesses are moved in front of the chain whose elements have at
243 * least CHAIN_THRESH less accesses than the pivot element
245 #define VOLUME_HASH_REORDER_CHAIN_THRESH (VOLUME_HASH_REORDER_THRESHOLD / 2)
247 #include "rx/rx_queue.h"
250 VolumeHashTable_t VolumeHashTable = {
251 DEFAULT_VOLUME_HASH_SIZE,
252 DEFAULT_VOLUME_HASH_MASK,
257 static void VInitVolumeHash(void);
261 /* This macro is used where an ffs() call does not exist. Was in util/ffs.c */
265 afs_int32 ffs_tmp = x;
269 for (ffs_i = 1;; ffs_i++) {
276 #endif /* !AFS_HAVE_FFS */
278 #ifdef AFS_PTHREAD_ENV
279 typedef struct diskpartition_queue_t {
280 struct rx_queue queue;
281 struct DiskPartition * diskP;
282 } diskpartition_queue_t;
283 typedef struct vinitvolumepackage_thread_t {
284 struct rx_queue queue;
285 pthread_cond_t thread_done_cv;
286 int n_threads_complete;
287 } vinitvolumepackage_thread_t;
288 static void * VInitVolumePackageThread(void * args);
289 #endif /* AFS_PTHREAD_ENV */
291 static int VAttachVolumesByPartition(struct DiskPartition *diskP,
292 int * nAttached, int * nUnattached);
295 #ifdef AFS_DEMAND_ATTACH_FS
296 /* demand attach fileserver extensions */
299 * in the future we will support serialization of VLRU state into the fs_state
302 * these structures are the beginning of that effort
304 struct VLRU_DiskHeader {
305 struct versionStamp stamp; /* magic and structure version number */
306 afs_uint32 mtime; /* time of dump to disk */
307 afs_uint32 num_records; /* number of VLRU_DiskEntry records */
310 struct VLRU_DiskEntry {
311 afs_uint32 vid; /* volume ID */
312 afs_uint32 idx; /* generation */
313 afs_uint32 last_get; /* timestamp of last get */
316 struct VLRU_StartupQueue {
317 struct VLRU_DiskEntry * entry;
322 typedef struct vshutdown_thread_t {
324 pthread_mutex_t lock;
326 pthread_cond_t master_cv;
328 int n_threads_complete;
330 int schedule_version;
333 byte n_parts_done_pass;
334 byte part_thread_target[VOLMAXPARTS+1];
335 byte part_done_pass[VOLMAXPARTS+1];
336 struct rx_queue * part_pass_head[VOLMAXPARTS+1];
337 int stats[4][VOLMAXPARTS+1];
338 } vshutdown_thread_t;
339 static void * VShutdownThread(void * args);
342 static Volume * VAttachVolumeByVp_r(Error * ec, Volume * vp, int mode);
343 static int VCheckFree(Volume * vp);
346 static void AddVolumeToVByPList_r(Volume * vp);
347 static void DeleteVolumeFromVByPList_r(Volume * vp);
348 static void VVByPListBeginExclusive_r(struct DiskPartition * dp);
349 static void VVByPListEndExclusive_r(struct DiskPartition * dp);
350 static void VVByPListWait_r(struct DiskPartition * dp);
352 /* online salvager */
353 static int VCheckSalvage(register Volume * vp);
354 static int VUpdateSalvagePriority_r(Volume * vp);
355 static int VScheduleSalvage_r(Volume * vp);
356 static int VCancelSalvage_r(Volume * vp, int reason);
358 /* Volume hash table */
359 static void VReorderHash_r(VolumeHashChainHead * head, Volume * pp, Volume * vp);
360 static void VHashBeginExclusive_r(VolumeHashChainHead * head);
361 static void VHashEndExclusive_r(VolumeHashChainHead * head);
362 static void VHashWait_r(VolumeHashChainHead * head);
365 static int ShutdownVByPForPass_r(struct DiskPartition * dp, int pass);
366 static int ShutdownVolumeWalk_r(struct DiskPartition * dp, int pass,
367 struct rx_queue ** idx);
368 static void ShutdownController(vshutdown_thread_t * params);
369 static void ShutdownCreateSchedule(vshutdown_thread_t * params);
372 static void VLRU_ComputeConstants(void);
373 static void VInitVLRU(void);
374 static void VLRU_Init_Node_r(volatile Volume * vp);
375 static void VLRU_Add_r(volatile Volume * vp);
376 static void VLRU_Delete_r(volatile Volume * vp);
377 static void VLRU_UpdateAccess_r(volatile Volume * vp);
378 static void * VLRU_ScannerThread(void * args);
379 static void VLRU_Scan_r(int idx);
380 static void VLRU_Promote_r(int idx);
381 static void VLRU_Demote_r(int idx);
382 static void VLRU_SwitchQueues(volatile Volume * vp, int new_idx, int append);
385 static int VCheckSoftDetach(volatile Volume * vp, afs_uint32 thresh);
386 static int VCheckSoftDetachCandidate(volatile Volume * vp, afs_uint32 thresh);
387 static int VSoftDetachVolume_r(volatile Volume * vp, afs_uint32 thresh);
388 #endif /* AFS_DEMAND_ATTACH_FS */
391 struct Lock vol_listLock; /* Lock obtained when listing volumes:
392 * prevents a volume from being missed
393 * if the volume is attached during a
397 static int TimeZoneCorrection; /* Number of seconds west of GMT */
399 /* Common message used when the volume goes off line */
400 char *VSalvageMessage =
401 "Files in this volume are currently unavailable; call operations";
403 int VInit; /* 0 - uninitialized,
404 * 1 - initialized but not all volumes have been attached,
405 * 2 - initialized and all volumes have been attached,
406 * 3 - initialized, all volumes have been attached, and
407 * VConnectFS() has completed. */
410 bit32 VolumeCacheCheck; /* Incremented everytime a volume goes on line--
411 * used to stamp volume headers and in-core
412 * vnodes. When the volume goes on-line the
413 * vnode will be invalidated
414 * access only with VOL_LOCK held */
419 /***************************************************/
420 /* Startup routines */
421 /***************************************************/
424 VInitVolumePackage(ProgramType pt, afs_uint32 nLargeVnodes, afs_uint32 nSmallVnodes,
425 int connect, afs_uint32 volcache)
427 int errors = 0; /* Number of errors while finding vice partitions. */
433 memset(&VStats, 0, sizeof(VStats));
434 VStats.hdr_cache_size = 200;
436 VInitPartitionPackage();
438 #ifdef AFS_DEMAND_ATTACH_FS
439 if (programType == fileServer) {
442 VLRU_SetOptions(VLRU_SET_ENABLED, 0);
446 #ifdef AFS_PTHREAD_ENV
447 assert(pthread_mutex_init(&vol_glock_mutex, NULL) == 0);
448 assert(pthread_mutex_init(&vol_trans_mutex, NULL) == 0);
449 assert(pthread_cond_init(&vol_put_volume_cond, NULL) == 0);
450 assert(pthread_cond_init(&vol_sleep_cond, NULL) == 0);
451 #else /* AFS_PTHREAD_ENV */
453 #endif /* AFS_PTHREAD_ENV */
454 Lock_Init(&vol_listLock);
456 srandom(time(0)); /* For VGetVolumeInfo */
457 gettimeofday(&tv, &tz);
458 TimeZoneCorrection = tz.tz_minuteswest * 60;
460 #ifdef AFS_DEMAND_ATTACH_FS
461 assert(pthread_mutex_init(&vol_salvsync_mutex, NULL) == 0);
462 #endif /* AFS_DEMAND_ATTACH_FS */
464 /* Ok, we have done enough initialization that fileserver can
465 * start accepting calls, even though the volumes may not be
466 * available just yet.
470 #if defined(AFS_DEMAND_ATTACH_FS) && defined(SALVSYNC_BUILD_SERVER)
471 if (programType == salvageServer) {
474 #endif /* AFS_DEMAND_ATTACH_FS */
475 #ifdef FSSYNC_BUILD_SERVER
476 if (programType == fileServer) {
480 #if defined(AFS_DEMAND_ATTACH_FS) && defined(SALVSYNC_BUILD_CLIENT)
481 if (programType == fileServer) {
482 /* establish a connection to the salvager at this point */
483 assert(VConnectSALV() != 0);
485 #endif /* AFS_DEMAND_ATTACH_FS */
487 if (volcache > VStats.hdr_cache_size)
488 VStats.hdr_cache_size = volcache;
489 VInitVolumeHeaderCache(VStats.hdr_cache_size);
491 VInitVnodes(vLarge, nLargeVnodes);
492 VInitVnodes(vSmall, nSmallVnodes);
495 errors = VAttachPartitions();
499 if (programType == fileServer) {
500 struct DiskPartition *diskP;
501 #ifdef AFS_PTHREAD_ENV
502 struct vinitvolumepackage_thread_t params;
503 struct diskpartition_queue_t * dpq;
504 int i, threads, parts;
506 pthread_attr_t attrs;
508 assert(pthread_cond_init(¶ms.thread_done_cv,NULL) == 0);
510 params.n_threads_complete = 0;
512 /* create partition work queue */
513 for (parts=0, diskP = DiskPartitionList; diskP; diskP = diskP->next, parts++) {
514 dpq = (diskpartition_queue_t *) malloc(sizeof(struct diskpartition_queue_t));
517 queue_Append(¶ms,dpq);
520 threads = MIN(parts, vol_attach_threads);
523 /* spawn off a bunch of initialization threads */
524 assert(pthread_attr_init(&attrs) == 0);
525 assert(pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED) == 0);
527 Log("VInitVolumePackage: beginning parallel fileserver startup\n");
528 #ifdef AFS_DEMAND_ATTACH_FS
529 Log("VInitVolumePackage: using %d threads to pre-attach volumes on %d partitions\n",
531 #else /* AFS_DEMAND_ATTACH_FS */
532 Log("VInitVolumePackage: using %d threads to attach volumes on %d partitions\n",
534 #endif /* AFS_DEMAND_ATTACH_FS */
537 for (i=0; i < threads; i++) {
538 assert(pthread_create
539 (&tid, &attrs, &VInitVolumePackageThread,
543 while(params.n_threads_complete < threads) {
544 VOL_CV_WAIT(¶ms.thread_done_cv);
548 assert(pthread_attr_destroy(&attrs) == 0);
550 /* if we're only going to run one init thread, don't bother creating
552 Log("VInitVolumePackage: beginning single-threaded fileserver startup\n");
553 #ifdef AFS_DEMAND_ATTACH_FS
554 Log("VInitVolumePackage: using 1 thread to pre-attach volumes on %d partition(s)\n",
556 #else /* AFS_DEMAND_ATTACH_FS */
557 Log("VInitVolumePackage: using 1 thread to attach volumes on %d partition(s)\n",
559 #endif /* AFS_DEMAND_ATTACH_FS */
561 VInitVolumePackageThread(¶ms);
564 assert(pthread_cond_destroy(¶ms.thread_done_cv) == 0);
566 #else /* AFS_PTHREAD_ENV */
570 /* Attach all the volumes in this partition */
571 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
572 int nAttached = 0, nUnattached = 0;
573 assert(VAttachVolumesByPartition(diskP, &nAttached, &nUnattached) == 0);
575 #endif /* AFS_PTHREAD_ENV */
578 VInit = 2; /* Initialized, and all volumes have been attached */
579 #ifdef FSSYNC_BUILD_CLIENT
580 if (programType == volumeUtility && connect) {
582 Log("Unable to connect to file server; aborted\n");
586 #ifdef AFS_DEMAND_ATTACH_FS
587 else if (programType == salvageServer) {
589 Log("Unable to connect to file server; aborted\n");
593 #endif /* AFS_DEMAND_ATTACH_FS */
594 #endif /* FSSYNC_BUILD_CLIENT */
598 #ifdef AFS_PTHREAD_ENV
600 VInitVolumePackageThread(void * args) {
601 int errors = 0; /* Number of errors while finding vice partitions. */
605 struct DiskPartition *diskP;
606 struct vinitvolumepackage_thread_t * params;
607 struct diskpartition_queue_t * dpq;
609 params = (vinitvolumepackage_thread_t *) args;
613 /* Attach all the volumes in this partition */
614 while (queue_IsNotEmpty(params)) {
615 int nAttached = 0, nUnattached = 0;
617 dpq = queue_First(params,diskpartition_queue_t);
623 assert(VAttachVolumesByPartition(diskP, &nAttached, &nUnattached) == 0);
628 params->n_threads_complete++;
629 pthread_cond_signal(¶ms->thread_done_cv);
633 #endif /* AFS_PTHREAD_ENV */
636 * attach all volumes on a given disk partition
639 VAttachVolumesByPartition(struct DiskPartition *diskP, int * nAttached, int * nUnattached)
645 Log("Partition %s: attaching volumes\n", diskP->name);
646 dirp = opendir(VPartitionPath(diskP));
648 Log("opendir on Partition %s failed!\n", diskP->name);
652 while ((dp = readdir(dirp))) {
654 p = strrchr(dp->d_name, '.');
655 if (p != NULL && strcmp(p, VHDREXT) == 0) {
658 #ifdef AFS_DEMAND_ATTACH_FS
659 vp = VPreAttachVolumeByName(&error, diskP->name, dp->d_name);
660 #else /* AFS_DEMAND_ATTACH_FS */
661 vp = VAttachVolumeByName(&error, diskP->name, dp->d_name,
663 #endif /* AFS_DEMAND_ATTACH_FS */
664 (*(vp ? nAttached : nUnattached))++;
665 if (error == VOFFLINE)
666 Log("Volume %d stays offline (/vice/offline/%s exists)\n", VolumeNumber(dp->d_name), dp->d_name);
667 else if (LogLevel >= 5) {
668 Log("Partition %s: attached volume %d (%s)\n",
669 diskP->name, VolumeNumber(dp->d_name),
672 #if !defined(AFS_DEMAND_ATTACH_FS)
676 #endif /* AFS_DEMAND_ATTACH_FS */
680 Log("Partition %s: attached %d volumes; %d volumes not attached\n", diskP->name, *nAttached, *nUnattached);
686 /***************************************************/
687 /* Shutdown routines */
688 /***************************************************/
692 * highly multithreaded volume package shutdown
694 * with the demand attach fileserver extensions,
695 * VShutdown has been modified to be multithreaded.
696 * In order to achieve optimal use of many threads,
697 * the shutdown code involves one control thread and
698 * n shutdown worker threads. The control thread
699 * periodically examines the number of volumes available
700 * for shutdown on each partition, and produces a worker
701 * thread allocation schedule. The idea is to eliminate
702 * redundant scheduling computation on the workers by
703 * having a single master scheduler.
705 * The scheduler's objectives are:
707 * each partition with volumes remaining gets allocated
708 * at least 1 thread (assuming sufficient threads)
710 * threads are allocated proportional to the number of
711 * volumes remaining to be offlined. This ensures that
712 * the OS I/O scheduler has many requests to elevator
713 * seek on partitions that will (presumably) take the
714 * longest amount of time (from now) to finish shutdown
715 * (3) keep threads busy
716 * when there are extra threads, they are assigned to
717 * partitions using a simple round-robin algorithm
719 * In the future, we may wish to add the ability to adapt
720 * to the relative performance patterns of each disk
725 * multi-step shutdown process
727 * demand attach shutdown is a four-step process. Each
728 * shutdown "pass" shuts down increasingly more difficult
729 * volumes. The main purpose is to achieve better cache
730 * utilization during shutdown.
733 * shutdown volumes in the unattached, pre-attached
736 * shutdown attached volumes with cached volume headers
738 * shutdown all volumes in non-exclusive states
740 * shutdown all remaining volumes
747 register Volume *vp, *np;
748 register afs_int32 code;
749 #ifdef AFS_DEMAND_ATTACH_FS
750 struct DiskPartition * diskP;
751 struct diskpartition_queue_t * dpq;
752 vshutdown_thread_t params;
754 pthread_attr_t attrs;
756 memset(¶ms, 0, sizeof(vshutdown_thread_t));
758 for (params.n_parts=0, diskP = DiskPartitionList;
759 diskP; diskP = diskP->next, params.n_parts++);
761 Log("VShutdown: shutting down on-line volumes on %d partition%s...\n",
762 params.n_parts, params.n_parts > 1 ? "s" : "");
764 if (vol_attach_threads > 1) {
765 /* prepare for parallel shutdown */
766 params.n_threads = vol_attach_threads;
767 assert(pthread_mutex_init(¶ms.lock, NULL) == 0);
768 assert(pthread_cond_init(¶ms.cv, NULL) == 0);
769 assert(pthread_cond_init(¶ms.master_cv, NULL) == 0);
770 assert(pthread_attr_init(&attrs) == 0);
771 assert(pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED) == 0);
774 /* setup the basic partition information structures for
775 * parallel shutdown */
776 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
778 struct rx_queue * qp, * nqp;
782 VVByPListWait_r(diskP);
783 VVByPListBeginExclusive_r(diskP);
786 for (queue_Scan(&diskP->vol_list, qp, nqp, rx_queue)) {
787 vp = (Volume *)((char *)qp - offsetof(Volume, vol_list));
791 Log("VShutdown: partition %s has %d volumes with attached headers\n",
792 VPartitionPath(diskP), count);
795 /* build up the pass 0 shutdown work queue */
796 dpq = (struct diskpartition_queue_t *) malloc(sizeof(struct diskpartition_queue_t));
799 queue_Prepend(¶ms, dpq);
801 params.part_pass_head[diskP->device] = queue_First(&diskP->vol_list, rx_queue);
804 Log("VShutdown: beginning parallel fileserver shutdown\n");
805 Log("VShutdown: using %d threads to offline volumes on %d partition%s\n",
806 vol_attach_threads, params.n_parts, params.n_parts > 1 ? "s" : "" );
808 /* do pass 0 shutdown */
809 assert(pthread_mutex_lock(¶ms.lock) == 0);
810 for (i=0; i < params.n_threads; i++) {
811 assert(pthread_create
812 (&tid, &attrs, &VShutdownThread,
816 /* wait for all the pass 0 shutdowns to complete */
817 while (params.n_threads_complete < params.n_threads) {
818 assert(pthread_cond_wait(¶ms.master_cv, ¶ms.lock) == 0);
820 params.n_threads_complete = 0;
822 assert(pthread_cond_broadcast(¶ms.cv) == 0);
823 assert(pthread_mutex_unlock(¶ms.lock) == 0);
825 Log("VShutdown: pass 0 completed using the 1 thread per partition algorithm\n");
826 Log("VShutdown: starting passes 1 through 3 using finely-granular mp-fast algorithm\n");
828 /* run the parallel shutdown scheduler. it will drop the glock internally */
829 ShutdownController(¶ms);
831 /* wait for all the workers to finish pass 3 and terminate */
832 while (params.pass < 4) {
833 VOL_CV_WAIT(¶ms.cv);
836 assert(pthread_attr_destroy(&attrs) == 0);
837 assert(pthread_cond_destroy(¶ms.cv) == 0);
838 assert(pthread_cond_destroy(¶ms.master_cv) == 0);
839 assert(pthread_mutex_destroy(¶ms.lock) == 0);
841 /* drop the VByPList exclusive reservations */
842 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
843 VVByPListEndExclusive_r(diskP);
844 Log("VShutdown: %s stats : (pass[0]=%d, pass[1]=%d, pass[2]=%d, pass[3]=%d)\n",
845 VPartitionPath(diskP),
846 params.stats[0][diskP->device],
847 params.stats[1][diskP->device],
848 params.stats[2][diskP->device],
849 params.stats[3][diskP->device]);
852 Log("VShutdown: shutdown finished using %d threads\n", params.n_threads);
854 /* if we're only going to run one shutdown thread, don't bother creating
856 Log("VShutdown: beginning single-threaded fileserver shutdown\n");
858 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
859 VShutdownByPartition_r(diskP);
863 Log("VShutdown: complete.\n");
864 #else /* AFS_DEMAND_ATTACH_FS */
865 Log("VShutdown: shutting down on-line volumes...\n");
866 for (i = 0; i < VolumeHashTable.Size; i++) {
867 /* try to hold first volume in the hash table */
868 for (queue_Scan(&VolumeHashTable.Table[i],vp,np,Volume)) {
872 Log("VShutdown: Attempting to take volume %u offline.\n",
875 /* next, take the volume offline (drops reference count) */
876 VOffline_r(vp, "File server was shut down");
880 Log("VShutdown: complete.\n");
881 #endif /* AFS_DEMAND_ATTACH_FS */
892 #ifdef AFS_DEMAND_ATTACH_FS
895 * shutdown control thread
898 ShutdownController(vshutdown_thread_t * params)
901 struct DiskPartition * diskP;
903 vshutdown_thread_t shadow;
905 ShutdownCreateSchedule(params);
907 while ((params->pass < 4) &&
908 (params->n_threads_complete < params->n_threads)) {
909 /* recompute schedule once per second */
911 memcpy(&shadow, params, sizeof(vshutdown_thread_t));
915 Log("ShutdownController: schedule version=%d, vol_remaining=%d, pass=%d\n",
916 shadow.schedule_version, shadow.vol_remaining, shadow.pass);
917 Log("ShutdownController: n_threads_complete=%d, n_parts_done_pass=%d\n",
918 shadow.n_threads_complete, shadow.n_parts_done_pass);
919 for (diskP = DiskPartitionList; diskP; diskP=diskP->next) {
921 Log("ShutdownController: part[%d] : (len=%d, thread_target=%d, done_pass=%d, pass_head=%p)\n",
924 shadow.part_thread_target[id],
925 shadow.part_done_pass[id],
926 shadow.part_pass_head[id]);
932 ShutdownCreateSchedule(params);
936 /* create the shutdown thread work schedule.
937 * this scheduler tries to implement fairness
938 * by allocating at least 1 thread to each
939 * partition with volumes to be shutdown,
940 * and then it attempts to allocate remaining
941 * threads based upon the amount of work left
944 ShutdownCreateSchedule(vshutdown_thread_t * params)
946 struct DiskPartition * diskP;
947 int sum, thr_workload, thr_left;
948 int part_residue[VOLMAXPARTS+1];
951 /* compute the total number of outstanding volumes */
953 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
954 sum += diskP->vol_list.len;
957 params->schedule_version++;
958 params->vol_remaining = sum;
963 /* compute average per-thread workload */
964 thr_workload = sum / params->n_threads;
965 if (sum % params->n_threads)
968 thr_left = params->n_threads;
969 memset(&part_residue, 0, sizeof(part_residue));
971 /* for fairness, give every partition with volumes remaining
972 * at least one thread */
973 for (diskP = DiskPartitionList; diskP && thr_left; diskP = diskP->next) {
975 if (diskP->vol_list.len) {
976 params->part_thread_target[id] = 1;
979 params->part_thread_target[id] = 0;
983 if (thr_left && thr_workload) {
984 /* compute length-weighted workloads */
987 for (diskP = DiskPartitionList; diskP && thr_left; diskP = diskP->next) {
989 delta = (diskP->vol_list.len / thr_workload) -
990 params->part_thread_target[id];
994 if (delta < thr_left) {
995 params->part_thread_target[id] += delta;
998 params->part_thread_target[id] += thr_left;
1006 /* try to assign any leftover threads to partitions that
1007 * had volume lengths closer to needing thread_target+1 */
1008 int max_residue, max_id;
1010 /* compute the residues */
1011 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1013 part_residue[id] = diskP->vol_list.len -
1014 (params->part_thread_target[id] * thr_workload);
1017 /* now try to allocate remaining threads to partitions with the
1018 * highest residues */
1021 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1023 if (part_residue[id] > max_residue) {
1024 max_residue = part_residue[id];
1033 params->part_thread_target[max_id]++;
1035 part_residue[max_id] = 0;
1040 /* punt and give any remaining threads equally to each partition */
1042 if (thr_left >= params->n_parts) {
1043 alloc = thr_left / params->n_parts;
1044 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1046 params->part_thread_target[id] += alloc;
1051 /* finish off the last of the threads */
1052 for (diskP = DiskPartitionList; thr_left && diskP; diskP = diskP->next) {
1054 params->part_thread_target[id]++;
1060 /* worker thread for parallel shutdown */
1062 VShutdownThread(void * args)
1064 struct rx_queue *qp;
1066 vshutdown_thread_t * params;
1067 int part, code, found, pass, schedule_version_save, count;
1068 struct DiskPartition *diskP;
1069 struct diskpartition_queue_t * dpq;
1072 params = (vshutdown_thread_t *) args;
1074 /* acquire the shutdown pass 0 lock */
1075 assert(pthread_mutex_lock(¶ms->lock) == 0);
1077 /* if there's still pass 0 work to be done,
1078 * get a work entry, and do a pass 0 shutdown */
1079 if (queue_IsNotEmpty(params)) {
1080 dpq = queue_First(params, diskpartition_queue_t);
1082 assert(pthread_mutex_unlock(¶ms->lock) == 0);
1088 while (ShutdownVolumeWalk_r(diskP, 0, ¶ms->part_pass_head[id]))
1090 params->stats[0][diskP->device] = count;
1091 assert(pthread_mutex_lock(¶ms->lock) == 0);
1094 params->n_threads_complete++;
1095 if (params->n_threads_complete == params->n_threads) {
1096 /* notify control thread that all workers have completed pass 0 */
1097 assert(pthread_cond_signal(¶ms->master_cv) == 0);
1099 while (params->pass == 0) {
1100 assert(pthread_cond_wait(¶ms->cv, ¶ms->lock) == 0);
1104 assert(pthread_mutex_unlock(¶ms->lock) == 0);
1107 pass = params->pass;
1110 /* now escalate through the more complicated shutdowns */
1112 schedule_version_save = params->schedule_version;
1114 /* find a disk partition to work on */
1115 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1117 if (params->part_thread_target[id] && !params->part_done_pass[id]) {
1118 params->part_thread_target[id]--;
1125 /* hmm. for some reason the controller thread couldn't find anything for
1126 * us to do. let's see if there's anything we can do */
1127 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1129 if (diskP->vol_list.len && !params->part_done_pass[id]) {
1132 } else if (!params->part_done_pass[id]) {
1133 params->part_done_pass[id] = 1;
1134 params->n_parts_done_pass++;
1136 Log("VShutdown: done shutting down volumes on partition %s.\n",
1137 VPartitionPath(diskP));
1143 /* do work on this partition until either the controller
1144 * creates a new schedule, or we run out of things to do
1145 * on this partition */
1148 while (!params->part_done_pass[id] &&
1149 (schedule_version_save == params->schedule_version)) {
1150 /* ShutdownVolumeWalk_r will drop the glock internally */
1151 if (!ShutdownVolumeWalk_r(diskP, pass, ¶ms->part_pass_head[id])) {
1152 if (!params->part_done_pass[id]) {
1153 params->part_done_pass[id] = 1;
1154 params->n_parts_done_pass++;
1156 Log("VShutdown: done shutting down volumes on partition %s.\n",
1157 VPartitionPath(diskP));
1165 params->stats[pass][id] += count;
1167 /* ok, everyone is done this pass, proceed */
1170 params->n_threads_complete++;
1171 while (params->pass == pass) {
1172 if (params->n_threads_complete == params->n_threads) {
1173 /* we are the last thread to complete, so we will
1174 * reinitialize worker pool state for the next pass */
1175 params->n_threads_complete = 0;
1176 params->n_parts_done_pass = 0;
1178 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1180 params->part_done_pass[id] = 0;
1181 params->part_pass_head[id] = queue_First(&diskP->vol_list, rx_queue);
1184 /* compute a new thread schedule before releasing all the workers */
1185 ShutdownCreateSchedule(params);
1187 /* wake up all the workers */
1188 assert(pthread_cond_broadcast(¶ms->cv) == 0);
1191 Log("VShutdown: pass %d completed using %d threads on %d partitions\n",
1192 pass, params->n_threads, params->n_parts);
1195 VOL_CV_WAIT(¶ms->cv);
1198 pass = params->pass;
1212 /* shut down all volumes on a given disk partition
1214 * note that this function will not allow mp-fast
1215 * shutdown of a partition */
1217 VShutdownByPartition_r(struct DiskPartition * dp)
1223 /* wait for other exclusive ops to finish */
1224 VVByPListWait_r(dp);
1226 /* begin exclusive access */
1227 VVByPListBeginExclusive_r(dp);
1229 /* pick the low-hanging fruit first,
1230 * then do the complicated ones last
1231 * (has the advantage of keeping
1232 * in-use volumes up until the bitter end) */
1233 for (pass = 0, total=0; pass < 4; pass++) {
1234 pass_stats[pass] = ShutdownVByPForPass_r(dp, pass);
1235 total += pass_stats[pass];
1238 /* end exclusive access */
1239 VVByPListEndExclusive_r(dp);
1241 Log("VShutdownByPartition: shut down %d volumes on %s (pass[0]=%d, pass[1]=%d, pass[2]=%d, pass[3]=%d)\n",
1242 total, VPartitionPath(dp), pass_stats[0], pass_stats[1], pass_stats[2], pass_stats[3]);
1247 /* internal shutdown functionality
1249 * for multi-pass shutdown:
1250 * 0 to only "shutdown" {pre,un}attached and error state volumes
1251 * 1 to also shutdown attached volumes w/ volume header loaded
1252 * 2 to also shutdown attached volumes w/o volume header loaded
1253 * 3 to also shutdown exclusive state volumes
1255 * caller MUST hold exclusive access on the hash chain
1256 * because we drop vol_glock_mutex internally
1258 * this function is reentrant for passes 1--3
1259 * (e.g. multiple threads can cooperate to
1260 * shutdown a partition mp-fast)
1262 * pass 0 is not scaleable because the volume state data is
1263 * synchronized by vol_glock mutex, and the locking overhead
1264 * is too high to drop the lock long enough to do linked list
1268 ShutdownVByPForPass_r(struct DiskPartition * dp, int pass)
1270 struct rx_queue * q = queue_First(&dp->vol_list, rx_queue);
1273 while (ShutdownVolumeWalk_r(dp, pass, &q))
1279 /* conditionally shutdown one volume on partition dp
1280 * returns 1 if a volume was shutdown in this pass,
1283 ShutdownVolumeWalk_r(struct DiskPartition * dp, int pass,
1284 struct rx_queue ** idx)
1286 struct rx_queue *qp, *nqp;
1291 for (queue_ScanFrom(&dp->vol_list, qp, qp, nqp, rx_queue)) {
1292 vp = (Volume *) (((char *)qp) - offsetof(Volume, vol_list));
1296 if ((V_attachState(vp) != VOL_STATE_UNATTACHED) &&
1297 (V_attachState(vp) != VOL_STATE_ERROR) &&
1298 (V_attachState(vp) != VOL_STATE_PREATTACHED)) {
1302 if ((V_attachState(vp) == VOL_STATE_ATTACHED) &&
1303 (vp->header == NULL)) {
1307 if (VIsExclusiveState(V_attachState(vp))) {
1312 DeleteVolumeFromVByPList_r(vp);
1313 VShutdownVolume_r(vp);
1323 * shutdown a specific volume
1325 /* caller MUST NOT hold a heavyweight ref on vp */
1327 VShutdownVolume_r(Volume * vp)
1331 VCreateReservation_r(vp);
1333 if (LogLevel >= 5) {
1334 Log("VShutdownVolume_r: vid=%u, device=%d, state=%hu\n",
1335 vp->hashid, vp->partition->device, V_attachState(vp));
1338 /* wait for other blocking ops to finish */
1339 VWaitExclusiveState_r(vp);
1341 assert(VIsValidState(V_attachState(vp)));
1343 switch(V_attachState(vp)) {
1344 case VOL_STATE_SALVAGING:
1345 /* make sure salvager knows we don't want
1346 * the volume back */
1347 VCancelSalvage_r(vp, SALVSYNC_SHUTDOWN);
1348 case VOL_STATE_PREATTACHED:
1349 case VOL_STATE_ERROR:
1350 VChangeState_r(vp, VOL_STATE_UNATTACHED);
1351 case VOL_STATE_UNATTACHED:
1353 case VOL_STATE_GOING_OFFLINE:
1354 case VOL_STATE_SHUTTING_DOWN:
1355 case VOL_STATE_ATTACHED:
1359 Log("VShutdown: Attempting to take volume %u offline.\n",
1362 /* take the volume offline (drops reference count) */
1363 VOffline_r(vp, "File server was shut down");
1368 VCancelReservation_r(vp);
1372 #endif /* AFS_DEMAND_ATTACH_FS */
1375 /***************************************************/
1376 /* Header I/O routines */
1377 /***************************************************/
1379 /* open a descriptor for the inode (h),
1380 * read in an on-disk structure into buffer (to) of size (size),
1381 * verify versionstamp in structure has magic (magic) and
1382 * optionally verify version (version) if (version) is nonzero
1385 ReadHeader(Error * ec, IHandle_t * h, char *to, int size, bit32 magic,
1388 struct versionStamp *vsn;
1403 if (FDH_SEEK(fdP, 0, SEEK_SET) < 0) {
1405 FDH_REALLYCLOSE(fdP);
1408 vsn = (struct versionStamp *)to;
1409 if (FDH_READ(fdP, to, size) != size || vsn->magic != magic) {
1411 FDH_REALLYCLOSE(fdP);
1416 /* Check is conditional, in case caller wants to inspect version himself */
1417 if (version && vsn->version != version) {
1423 WriteVolumeHeader_r(Error * ec, Volume * vp)
1425 IHandle_t *h = V_diskDataHandle(vp);
1435 if (FDH_SEEK(fdP, 0, SEEK_SET) < 0) {
1437 FDH_REALLYCLOSE(fdP);
1440 if (FDH_WRITE(fdP, (char *)&V_disk(vp), sizeof(V_disk(vp)))
1441 != sizeof(V_disk(vp))) {
1443 FDH_REALLYCLOSE(fdP);
1449 /* VolumeHeaderToDisk
1450 * Allows for storing 64 bit inode numbers in on-disk volume header
1453 /* convert in-memory representation of a volume header to the
1454 * on-disk representation of a volume header */
1456 VolumeHeaderToDisk(VolumeDiskHeader_t * dh, VolumeHeader_t * h)
1459 memset((char *)dh, 0, sizeof(VolumeDiskHeader_t));
1460 dh->stamp = h->stamp;
1462 dh->parent = h->parent;
1464 #ifdef AFS_64BIT_IOPS_ENV
1465 dh->volumeInfo_lo = (afs_int32) h->volumeInfo & 0xffffffff;
1466 dh->volumeInfo_hi = (afs_int32) (h->volumeInfo >> 32) & 0xffffffff;
1467 dh->smallVnodeIndex_lo = (afs_int32) h->smallVnodeIndex & 0xffffffff;
1468 dh->smallVnodeIndex_hi =
1469 (afs_int32) (h->smallVnodeIndex >> 32) & 0xffffffff;
1470 dh->largeVnodeIndex_lo = (afs_int32) h->largeVnodeIndex & 0xffffffff;
1471 dh->largeVnodeIndex_hi =
1472 (afs_int32) (h->largeVnodeIndex >> 32) & 0xffffffff;
1473 dh->linkTable_lo = (afs_int32) h->linkTable & 0xffffffff;
1474 dh->linkTable_hi = (afs_int32) (h->linkTable >> 32) & 0xffffffff;
1476 dh->volumeInfo_lo = h->volumeInfo;
1477 dh->smallVnodeIndex_lo = h->smallVnodeIndex;
1478 dh->largeVnodeIndex_lo = h->largeVnodeIndex;
1479 dh->linkTable_lo = h->linkTable;
1483 /* DiskToVolumeHeader
1484 * Converts an on-disk representation of a volume header to
1485 * the in-memory representation of a volume header.
1487 * Makes the assumption that AFS has *always*
1488 * zero'd the volume header file so that high parts of inode
1489 * numbers are 0 in older (SGI EFS) volume header files.
1492 DiskToVolumeHeader(VolumeHeader_t * h, VolumeDiskHeader_t * dh)
1494 memset((char *)h, 0, sizeof(VolumeHeader_t));
1495 h->stamp = dh->stamp;
1497 h->parent = dh->parent;
1499 #ifdef AFS_64BIT_IOPS_ENV
1501 (Inode) dh->volumeInfo_lo | ((Inode) dh->volumeInfo_hi << 32);
1503 h->smallVnodeIndex =
1504 (Inode) dh->smallVnodeIndex_lo | ((Inode) dh->
1505 smallVnodeIndex_hi << 32);
1507 h->largeVnodeIndex =
1508 (Inode) dh->largeVnodeIndex_lo | ((Inode) dh->
1509 largeVnodeIndex_hi << 32);
1511 (Inode) dh->linkTable_lo | ((Inode) dh->linkTable_hi << 32);
1513 h->volumeInfo = dh->volumeInfo_lo;
1514 h->smallVnodeIndex = dh->smallVnodeIndex_lo;
1515 h->largeVnodeIndex = dh->largeVnodeIndex_lo;
1516 h->linkTable = dh->linkTable_lo;
1521 /***************************************************/
1522 /* Volume Attachment routines */
1523 /***************************************************/
1525 #ifdef AFS_DEMAND_ATTACH_FS
1527 * pre-attach a volume given its path.
1529 * @param[out] ec outbound error code
1530 * @param[in] partition partition path string
1531 * @param[in] name volume id string
1533 * @return volume object pointer
1535 * @note A pre-attached volume will only have its partition
1536 * and hashid fields initialized. At first call to
1537 * VGetVolume, the volume will be fully attached.
1541 VPreAttachVolumeByName(Error * ec, char *partition, char *name)
1545 vp = VPreAttachVolumeByName_r(ec, partition, name);
1551 * pre-attach a volume given its path.
1553 * @param[out] ec outbound error code
1554 * @param[in] partition path to vice partition
1555 * @param[in] name volume id string
1557 * @return volume object pointer
1559 * @pre VOL_LOCK held
1561 * @internal volume package internal use only.
1564 VPreAttachVolumeByName_r(Error * ec, char *partition, char *name)
1566 return VPreAttachVolumeById_r(ec,
1568 VolumeNumber(name));
1572 * pre-attach a volume given its path and numeric volume id.
1574 * @param[out] ec error code return
1575 * @param[in] partition path to vice partition
1576 * @param[in] volumeId numeric volume id
1578 * @return volume object pointer
1580 * @pre VOL_LOCK held
1582 * @internal volume package internal use only.
1585 VPreAttachVolumeById_r(Error * ec,
1590 struct DiskPartition *partp;
1594 assert(programType == fileServer);
1596 if (!(partp = VGetPartition_r(partition, 0))) {
1598 Log("VPreAttachVolumeById_r: Error getting partition (%s)\n", partition);
1602 vp = VLookupVolume_r(ec, volumeId, NULL);
1607 return VPreAttachVolumeByVp_r(ec, partp, vp, volumeId);
1611 * preattach a volume.
1613 * @param[out] ec outbound error code
1614 * @param[in] partp pointer to partition object
1615 * @param[in] vp pointer to volume object
1616 * @param[in] vid volume id
1618 * @return volume object pointer
1620 * @pre VOL_LOCK is held.
1622 * @warning Returned volume object pointer does not have to
1623 * equal the pointer passed in as argument vp. There
1624 * are potential race conditions which can result in
1625 * the pointers having different values. It is up to
1626 * the caller to make sure that references are handled
1627 * properly in this case.
1629 * @note If there is already a volume object registered with
1630 * the same volume id, its pointer MUST be passed as
1631 * argument vp. Failure to do so will result in a silent
1632 * failure to preattach.
1634 * @internal volume package internal use only.
1637 VPreAttachVolumeByVp_r(Error * ec,
1638 struct DiskPartition * partp,
1646 /* check to see if pre-attach already happened */
1648 (V_attachState(vp) != VOL_STATE_UNATTACHED) &&
1649 !VIsErrorState(V_attachState(vp)) &&
1650 ((V_attachState(vp) != VOL_STATE_PREATTACHED) ||
1651 vp->pending_vol_op == NULL)) {
1653 * pre-attach is a no-op in all but the following cases:
1655 * - volume is unattached
1656 * - volume is in an error state
1657 * - volume is pre-attached with a pending volume operation
1658 * (e.g. vos move between two partitions on same server)
1662 /* we're re-attaching a volume; clear out some old state */
1663 memset(&vp->salvage, 0, sizeof(struct VolumeOnlineSalvage));
1665 if (V_partition(vp) != partp) {
1666 /* XXX potential race */
1667 DeleteVolumeFromVByPList_r(vp);
1670 /* if we need to allocate a new Volume struct,
1671 * go ahead and drop the vol glock, otherwise
1672 * do the basic setup synchronised, as it's
1673 * probably not worth dropping the lock */
1676 /* allocate the volume structure */
1677 vp = nvp = (Volume *) malloc(sizeof(Volume));
1679 memset(vp, 0, sizeof(Volume));
1680 queue_Init(&vp->vnode_list);
1681 assert(pthread_cond_init(&V_attachCV(vp), NULL) == 0);
1684 /* link the volume with its associated vice partition */
1685 vp->device = partp->device;
1686 vp->partition = partp;
1689 /* if we dropped the lock, reacquire the lock,
1690 * check for pre-attach races, and then add
1691 * the volume to the hash table */
1694 nvp = VLookupVolume_r(ec, vid, NULL);
1699 } else if (nvp) { /* race detected */
1704 /* hack to make up for VChangeState_r() decrementing
1705 * the old state counter */
1706 VStats.state_levels[0]++;
1710 /* put pre-attached volume onto the hash table
1711 * and bring it up to the pre-attached state */
1712 AddVolumeToHashTable(vp, vp->hashid);
1713 AddVolumeToVByPList_r(vp);
1714 VLRU_Init_Node_r(vp);
1715 VChangeState_r(vp, VOL_STATE_PREATTACHED);
1718 Log("VPreAttachVolumeByVp_r: volume %u pre-attached\n", vp->hashid);
1726 #endif /* AFS_DEMAND_ATTACH_FS */
1728 /* Attach an existing volume, given its pathname, and return a
1729 pointer to the volume header information. The volume also
1730 normally goes online at this time. An offline volume
1731 must be reattached to make it go online */
1733 VAttachVolumeByName(Error * ec, char *partition, char *name, int mode)
1737 retVal = VAttachVolumeByName_r(ec, partition, name, mode);
1743 VAttachVolumeByName_r(Error * ec, char *partition, char *name, int mode)
1745 register Volume *vp = NULL, *svp = NULL;
1747 struct afs_stat status;
1748 struct VolumeDiskHeader diskHeader;
1749 struct VolumeHeader iheader;
1750 struct DiskPartition *partp;
1754 #ifdef AFS_DEMAND_ATTACH_FS
1755 VolumeStats stats_save;
1756 #endif /* AFS_DEMAND_ATTACH_FS */
1760 volumeId = VolumeNumber(name);
1762 if (!(partp = VGetPartition_r(partition, 0))) {
1764 Log("VAttachVolume: Error getting partition (%s)\n", partition);
1768 if (programType == volumeUtility) {
1770 VLockPartition_r(partition);
1771 } else if (programType == fileServer) {
1772 #ifdef AFS_DEMAND_ATTACH_FS
1773 /* lookup the volume in the hash table */
1774 vp = VLookupVolume_r(ec, volumeId, NULL);
1780 /* save any counters that are supposed to
1781 * be monotonically increasing over the
1782 * lifetime of the fileserver */
1783 memcpy(&stats_save, &vp->stats, sizeof(VolumeStats));
1785 memset(&stats_save, 0, sizeof(VolumeStats));
1788 /* if there's something in the hash table, and it's not
1789 * in the pre-attach state, then we may need to detach
1790 * it before proceeding */
1791 if (vp && (V_attachState(vp) != VOL_STATE_PREATTACHED)) {
1792 VCreateReservation_r(vp);
1793 VWaitExclusiveState_r(vp);
1795 /* at this point state must be one of:
1804 if (vp->specialStatus == VBUSY)
1807 /* if it's already attached, see if we can return it */
1808 if (V_attachState(vp) == VOL_STATE_ATTACHED) {
1809 VGetVolumeByVp_r(ec, vp);
1811 VCancelReservation_r(vp);
1815 /* otherwise, we need to detach, and attempt to re-attach */
1816 VDetachVolume_r(ec, vp);
1818 Log("VAttachVolume: Error detaching old volume instance (%s)\n", name);
1821 /* if it isn't fully attached, delete from the hash tables,
1822 and let the refcounter handle the rest */
1823 DeleteVolumeFromHashTable(vp);
1824 DeleteVolumeFromVByPList_r(vp);
1827 VCancelReservation_r(vp);
1831 /* pre-attach volume if it hasn't been done yet */
1833 (V_attachState(vp) == VOL_STATE_UNATTACHED) ||
1834 (V_attachState(vp) == VOL_STATE_ERROR)) {
1836 vp = VPreAttachVolumeByVp_r(ec, partp, vp, volumeId);
1844 /* handle pre-attach races
1846 * multiple threads can race to pre-attach a volume,
1847 * but we can't let them race beyond that
1849 * our solution is to let the first thread to bring
1850 * the volume into an exclusive state win; the other
1851 * threads just wait until it finishes bringing the
1852 * volume online, and then they do a vgetvolumebyvp
1854 if (svp && (svp != vp)) {
1855 /* wait for other exclusive ops to finish */
1856 VCreateReservation_r(vp);
1857 VWaitExclusiveState_r(vp);
1859 /* get a heavyweight ref, kill the lightweight ref, and return */
1860 VGetVolumeByVp_r(ec, vp);
1861 VCancelReservation_r(vp);
1865 /* at this point, we are chosen as the thread to do
1866 * demand attachment for this volume. all other threads
1867 * doing a getvolume on vp->hashid will block until we finish */
1869 /* make sure any old header cache entries are invalidated
1870 * before proceeding */
1871 FreeVolumeHeader(vp);
1873 VChangeState_r(vp, VOL_STATE_ATTACHING);
1875 /* restore any saved counters */
1876 memcpy(&vp->stats, &stats_save, sizeof(VolumeStats));
1877 #else /* AFS_DEMAND_ATTACH_FS */
1878 vp = VGetVolume_r(ec, volumeId);
1882 if (vp->specialStatus == VBUSY)
1884 VDetachVolume_r(ec, vp);
1886 Log("VAttachVolume: Error detaching volume (%s)\n", name);
1890 #endif /* AFS_DEMAND_ATTACH_FS */
1894 strcpy(path, VPartitionPath(partp));
1900 if ((fd = afs_open(path, O_RDONLY)) == -1 || afs_fstat(fd, &status) == -1) {
1901 Log("VAttachVolume: Failed to open %s (errno %d)\n", path, errno);
1908 n = read(fd, &diskHeader, sizeof(diskHeader));
1910 if (n != sizeof(diskHeader)
1911 || diskHeader.stamp.magic != VOLUMEHEADERMAGIC) {
1912 Log("VAttachVolume: Error reading volume header %s\n", path);
1917 if (diskHeader.stamp.version != VOLUMEHEADERVERSION) {
1918 Log("VAttachVolume: Volume %s, version number is incorrect; volume needs salvaged\n", path);
1924 DiskToVolumeHeader(&iheader, &diskHeader);
1925 #ifdef FSSYNC_BUILD_CLIENT
1926 if (programType == volumeUtility && mode != V_SECRETLY && mode != V_PEEK) {
1928 if (FSYNC_VolOp(iheader.id, partition, FSYNC_VOL_NEEDVOLUME, mode, NULL)
1930 Log("VAttachVolume: attach of volume %u apparently denied by file server\n", iheader.id);
1931 *ec = VNOVOL; /* XXXX */
1939 vp = (Volume *) calloc(1, sizeof(Volume));
1941 vp->device = partp->device;
1942 vp->partition = partp;
1943 queue_Init(&vp->vnode_list);
1944 #ifdef AFS_DEMAND_ATTACH_FS
1945 assert(pthread_cond_init(&V_attachCV(vp), NULL) == 0);
1946 #endif /* AFS_DEMAND_ATTACH_FS */
1949 /* attach2 is entered without any locks, and returns
1950 * with vol_glock_mutex held */
1951 vp = attach2(ec, volumeId, path, &iheader, partp, vp, isbusy, mode);
1953 if (programType == volumeUtility && vp) {
1954 #ifdef AFS_DEMAND_ATTACH_FS
1955 /* for dafs, we should tell the fileserver, except for V_PEEK
1956 * where we know it is not necessary */
1957 if (mode == V_PEEK) {
1958 vp->needsPutBack = 0;
1960 vp->needsPutBack = 1;
1962 #else /* !AFS_DEMAND_ATTACH_FS */
1963 /* duplicate computation in fssync.c about whether the server
1964 * takes the volume offline or not. If the volume isn't
1965 * offline, we must not return it when we detach the volume,
1966 * or the server will abort */
1967 if (mode == V_READONLY || mode == V_PEEK
1968 || (!VolumeWriteable(vp) && (mode == V_CLONE || mode == V_DUMP)))
1969 vp->needsPutBack = 0;
1971 vp->needsPutBack = 1;
1972 #endif /* !AFS_DEMAND_ATTACH_FS */
1974 /* OK, there's a problem here, but one that I don't know how to
1975 * fix right now, and that I don't think should arise often.
1976 * Basically, we should only put back this volume to the server if
1977 * it was given to us by the server, but since we don't have a vp,
1978 * we can't run the VolumeWriteable function to find out as we do
1979 * above when computing vp->needsPutBack. So we send it back, but
1980 * there's a path in VAttachVolume on the server which may abort
1981 * if this volume doesn't have a header. Should be pretty rare
1982 * for all of that to happen, but if it does, probably the right
1983 * fix is for the server to allow the return of readonly volumes
1984 * that it doesn't think are really checked out. */
1985 #ifdef FSSYNC_BUILD_CLIENT
1986 if (programType == volumeUtility && vp == NULL &&
1987 mode != V_SECRETLY && mode != V_PEEK) {
1988 FSYNC_VolOp(iheader.id, partition, FSYNC_VOL_ON, 0, NULL);
1991 if (programType == fileServer && vp) {
1992 #ifdef AFS_DEMAND_ATTACH_FS
1994 * we can get here in cases where we don't "own"
1995 * the volume (e.g. volume owned by a utility).
1996 * short circuit around potential disk header races.
1998 if (V_attachState(vp) != VOL_STATE_ATTACHED) {
2002 V_needsCallback(vp) = 0;
2004 if (VInit >= 2 && V_BreakVolumeCallbacks) {
2005 Log("VAttachVolume: Volume %u was changed externally; breaking callbacks\n", V_id(vp));
2006 (*V_BreakVolumeCallbacks) (V_id(vp));
2009 VUpdateVolume_r(ec, vp, 0);
2011 Log("VAttachVolume: Error updating volume\n");
2016 if (VolumeWriteable(vp) && V_dontSalvage(vp) == 0) {
2017 #ifndef AFS_DEMAND_ATTACH_FS
2018 /* This is a hack: by temporarily setting the incore
2019 * dontSalvage flag ON, the volume will be put back on the
2020 * Update list (with dontSalvage OFF again). It will then
2021 * come back in N minutes with DONT_SALVAGE eventually
2022 * set. This is the way that volumes that have never had
2023 * it set get it set; or that volumes that have been
2024 * offline without DONT SALVAGE having been set also
2025 * eventually get it set */
2026 V_dontSalvage(vp) = DONT_SALVAGE;
2027 #endif /* !AFS_DEMAND_ATTACH_FS */
2028 VAddToVolumeUpdateList_r(ec, vp);
2030 Log("VAttachVolume: Error adding volume to update list\n");
2037 Log("VOnline: volume %u (%s) attached and online\n", V_id(vp),
2042 if (programType == volumeUtility) {
2043 VUnlockPartition_r(partition);
2046 #ifdef AFS_DEMAND_ATTACH_FS
2047 /* attach failed; make sure we're in error state */
2048 if (vp && !VIsErrorState(V_attachState(vp))) {
2049 VChangeState_r(vp, VOL_STATE_ERROR);
2051 #endif /* AFS_DEMAND_ATTACH_FS */
2058 #ifdef AFS_DEMAND_ATTACH_FS
2059 /* VAttachVolumeByVp_r
2061 * finish attaching a volume that is
2062 * in a less than fully attached state
2064 /* caller MUST hold a ref count on vp */
2066 VAttachVolumeByVp_r(Error * ec, Volume * vp, int mode)
2068 char name[VMAXPATHLEN];
2069 int fd, n, reserve = 0;
2070 struct afs_stat status;
2071 struct VolumeDiskHeader diskHeader;
2072 struct VolumeHeader iheader;
2073 struct DiskPartition *partp;
2078 VolumeStats stats_save;
2081 /* volume utility should never call AttachByVp */
2082 assert(programType == fileServer);
2084 volumeId = vp->hashid;
2085 partp = vp->partition;
2086 VolumeExternalName_r(volumeId, name, sizeof(name));
2089 /* if another thread is performing a blocking op, wait */
2090 VWaitExclusiveState_r(vp);
2092 memcpy(&stats_save, &vp->stats, sizeof(VolumeStats));
2094 /* if it's already attached, see if we can return it */
2095 if (V_attachState(vp) == VOL_STATE_ATTACHED) {
2096 VGetVolumeByVp_r(ec, vp);
2100 if (vp->specialStatus == VBUSY)
2102 VDetachVolume_r(ec, vp);
2104 Log("VAttachVolume: Error detaching volume (%s)\n", name);
2110 /* pre-attach volume if it hasn't been done yet */
2112 (V_attachState(vp) == VOL_STATE_UNATTACHED) ||
2113 (V_attachState(vp) == VOL_STATE_ERROR)) {
2114 nvp = VPreAttachVolumeByVp_r(ec, partp, vp, volumeId);
2120 VCreateReservation_r(nvp);
2126 VChangeState_r(vp, VOL_STATE_ATTACHING);
2128 /* restore monotonically increasing stats */
2129 memcpy(&vp->stats, &stats_save, sizeof(VolumeStats));
2134 /* compute path to disk header,
2136 * and verify magic and version stamps */
2137 strcpy(path, VPartitionPath(partp));
2143 if ((fd = afs_open(path, O_RDONLY)) == -1 || afs_fstat(fd, &status) == -1) {
2144 Log("VAttachVolume: Failed to open %s (errno %d)\n", path, errno);
2151 n = read(fd, &diskHeader, sizeof(diskHeader));
2153 if (n != sizeof(diskHeader)
2154 || diskHeader.stamp.magic != VOLUMEHEADERMAGIC) {
2155 Log("VAttachVolume: Error reading volume header %s\n", path);
2160 if (diskHeader.stamp.version != VOLUMEHEADERVERSION) {
2161 Log("VAttachVolume: Volume %s, version number is incorrect; volume needs salvaged\n", path);
2167 /* convert on-disk header format to in-memory header format */
2168 DiskToVolumeHeader(&iheader, &diskHeader);
2172 * NOTE: attach2 is entered without any locks, and returns
2173 * with vol_glock_mutex held */
2174 vp = attach2(ec, volumeId, path, &iheader, partp, vp, isbusy, mode);
2177 * the event that an error was encountered, or
2178 * the volume was not brought to an attached state
2179 * for any reason, skip to the end. We cannot
2180 * safely call VUpdateVolume unless we "own" it.
2184 (V_attachState(vp) != VOL_STATE_ATTACHED)) {
2188 V_needsCallback(vp) = 0;
2189 VUpdateVolume_r(ec, vp, 0);
2191 Log("VAttachVolume: Error updating volume %u\n", vp->hashid);
2195 if (VolumeWriteable(vp) && V_dontSalvage(vp) == 0) {
2196 #ifndef AFS_DEMAND_ATTACH_FS
2197 /* This is a hack: by temporarily setting the incore
2198 * dontSalvage flag ON, the volume will be put back on the
2199 * Update list (with dontSalvage OFF again). It will then
2200 * come back in N minutes with DONT_SALVAGE eventually
2201 * set. This is the way that volumes that have never had
2202 * it set get it set; or that volumes that have been
2203 * offline without DONT SALVAGE having been set also
2204 * eventually get it set */
2205 V_dontSalvage(vp) = DONT_SALVAGE;
2206 #endif /* !AFS_DEMAND_ATTACH_FS */
2207 VAddToVolumeUpdateList_r(ec, vp);
2209 Log("VAttachVolume: Error adding volume %u to update list\n", vp->hashid);
2216 Log("VOnline: volume %u (%s) attached and online\n", V_id(vp),
2220 VCancelReservation_r(nvp);
2223 if (*ec && (*ec != VOFFLINE) && (*ec != VSALVAGE)) {
2224 if (vp && !VIsErrorState(V_attachState(vp))) {
2225 VChangeState_r(vp, VOL_STATE_ERROR);
2232 #endif /* AFS_DEMAND_ATTACH_FS */
2235 * called without any locks held
2236 * returns with vol_glock_mutex held
2239 attach2(Error * ec, VolId volumeId, char *path, register struct VolumeHeader * header,
2240 struct DiskPartition * partp, register Volume * vp, int isbusy, int mode)
2242 vp->specialStatus = (byte) (isbusy ? VBUSY : 0);
2243 IH_INIT(vp->vnodeIndex[vLarge].handle, partp->device, header->parent,
2244 header->largeVnodeIndex);
2245 IH_INIT(vp->vnodeIndex[vSmall].handle, partp->device, header->parent,
2246 header->smallVnodeIndex);
2247 IH_INIT(vp->diskDataHandle, partp->device, header->parent,
2248 header->volumeInfo);
2249 IH_INIT(vp->linkHandle, partp->device, header->parent, header->linkTable);
2250 vp->shuttingDown = 0;
2251 vp->goingOffline = 0;
2253 #ifdef AFS_DEMAND_ATTACH_FS
2254 vp->stats.last_attach = FT_ApproxTime();
2255 vp->stats.attaches++;
2259 IncUInt64(&VStats.attaches);
2260 vp->cacheCheck = ++VolumeCacheCheck;
2261 /* just in case this ever rolls over */
2262 if (!vp->cacheCheck)
2263 vp->cacheCheck = ++VolumeCacheCheck;
2264 GetVolumeHeader(vp);
2267 #if defined(AFS_DEMAND_ATTACH_FS) && defined(FSSYNC_BUILD_CLIENT)
2268 /* demand attach changes the V_PEEK mechanism
2270 * we can now suck the current disk data structure over
2271 * the fssync interface without going to disk
2273 * (technically, we don't need to restrict this feature
2274 * to demand attach fileservers. However, I'm trying
2275 * to limit the number of common code changes)
2277 if (programType != fileServer && mode == V_PEEK) {
2279 res.payload.len = sizeof(VolumeDiskData);
2280 res.payload.buf = &vp->header->diskstuff;
2282 if (FSYNC_VolOp(volumeId,
2283 VPartitionPath(partp),
2284 FSYNC_VOL_QUERY_HDR,
2287 goto disk_header_loaded;
2290 #endif /* AFS_DEMAND_ATTACH_FS && FSSYNC_BUILD_CLIENT */
2291 (void)ReadHeader(ec, V_diskDataHandle(vp), (char *)&V_disk(vp),
2292 sizeof(V_disk(vp)), VOLUMEINFOMAGIC, VOLUMEINFOVERSION);
2294 #ifdef AFS_DEMAND_ATTACH_FS
2297 IncUInt64(&VStats.hdr_loads);
2298 IncUInt64(&vp->stats.hdr_loads);
2300 #endif /* AFS_DEMAND_ATTACH_FS */
2303 Log("VAttachVolume: Error reading diskDataHandle vol header %s; error=%u\n", path, *ec);
2308 #ifdef AFS_DEMAND_ATTACH_FS
2311 /* check for pending volume operations */
2312 if (vp->pending_vol_op) {
2313 /* see if the pending volume op requires exclusive access */
2314 if (!VVolOpLeaveOnline_r(vp, vp->pending_vol_op)) {
2315 /* mark the volume down */
2317 VChangeState_r(vp, VOL_STATE_UNATTACHED);
2318 if (V_offlineMessage(vp)[0] == '\0')
2319 strlcpy(V_offlineMessage(vp),
2320 "A volume utility is running.",
2321 sizeof(V_offlineMessage(vp)));
2322 V_offlineMessage(vp)[sizeof(V_offlineMessage(vp)) - 1] = '\0';
2324 /* check to see if we should set the specialStatus flag */
2325 if (VVolOpSetVBusy_r(vp, vp->pending_vol_op)) {
2326 vp->specialStatus = VBUSY;
2331 V_attachFlags(vp) |= VOL_HDR_LOADED;
2332 vp->stats.last_hdr_load = vp->stats.last_attach;
2334 #endif /* AFS_DEMAND_ATTACH_FS */
2337 struct IndexFileHeader iHead;
2339 #if OPENAFS_VOL_STATS
2341 * We just read in the diskstuff part of the header. If the detailed
2342 * volume stats area has not yet been initialized, we should bzero the
2343 * area and mark it as initialized.
2345 if (!(V_stat_initialized(vp))) {
2346 memset((char *)(V_stat_area(vp)), 0, VOL_STATS_BYTES);
2347 V_stat_initialized(vp) = 1;
2349 #endif /* OPENAFS_VOL_STATS */
2351 (void)ReadHeader(ec, vp->vnodeIndex[vSmall].handle,
2352 (char *)&iHead, sizeof(iHead),
2353 SMALLINDEXMAGIC, SMALLINDEXVERSION);
2356 Log("VAttachVolume: Error reading smallVnode vol header %s; error=%u\n", path, *ec);
2361 struct IndexFileHeader iHead;
2363 (void)ReadHeader(ec, vp->vnodeIndex[vLarge].handle,
2364 (char *)&iHead, sizeof(iHead),
2365 LARGEINDEXMAGIC, LARGEINDEXVERSION);
2368 Log("VAttachVolume: Error reading largeVnode vol header %s; error=%u\n", path, *ec);
2372 #ifdef AFS_NAMEI_ENV
2374 struct versionStamp stamp;
2376 (void)ReadHeader(ec, V_linkHandle(vp), (char *)&stamp,
2377 sizeof(stamp), LINKTABLEMAGIC, LINKTABLEVERSION);
2380 Log("VAttachVolume: Error reading namei vol header %s; error=%u\n", path, *ec);
2383 #endif /* AFS_NAMEI_ENV */
2385 #if defined(AFS_DEMAND_ATTACH_FS)
2386 if (*ec && ((*ec != VOFFLINE) || (V_attachState(vp) != VOL_STATE_UNATTACHED))) {
2388 if (programType == fileServer) {
2389 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
2392 Log("VAttachVolume: Error attaching volume %s; volume needs salvage; error=%u\n", path, *ec);
2398 /* volume operation in progress */
2402 #else /* AFS_DEMAND_ATTACH_FS */
2404 Log("VAttachVolume: Error attaching volume %s; volume needs salvage; error=%u\n", path, *ec);
2409 #endif /* AFS_DEMAND_ATTACH_FS */
2411 if (V_needsSalvaged(vp)) {
2412 if (vp->specialStatus)
2413 vp->specialStatus = 0;
2415 #if defined(AFS_DEMAND_ATTACH_FS)
2416 if (programType == fileServer) {
2417 VRequestSalvage_r(ec, vp, SALVSYNC_NEEDED, VOL_SALVAGE_INVALIDATE_HEADER);
2420 Log("VAttachVolume: volume salvage flag is ON for %s; volume needs salvage\n", path);
2424 #else /* AFS_DEMAND_ATTACH_FS */
2427 #endif /* AFS_DEMAND_ATTACH_FS */
2432 if (programType == fileServer) {
2433 #ifndef FAST_RESTART
2434 if (V_inUse(vp) && VolumeWriteable(vp)) {
2435 if (!V_needsSalvaged(vp)) {
2436 V_needsSalvaged(vp) = 1;
2437 VUpdateVolume_r(ec, vp, 0);
2439 #if defined(AFS_DEMAND_ATTACH_FS)
2440 VRequestSalvage_r(ec, vp, SALVSYNC_NEEDED, VOL_SALVAGE_INVALIDATE_HEADER);
2442 #else /* AFS_DEMAND_ATTACH_FS */
2443 Log("VAttachVolume: volume %s needs to be salvaged; not attached.\n", path);
2446 #endif /* AFS_DEMAND_ATTACH_FS */
2449 #endif /* FAST_RESTART */
2451 if (V_destroyMe(vp) == DESTROY_ME) {
2452 #if defined(AFS_DEMAND_ATTACH_FS)
2453 /* schedule a salvage so the volume goes away on disk */
2454 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
2455 VChangeState_r(vp, VOL_STATE_ERROR);
2457 #endif /* AFS_DEMAND_ATTACH_FS */
2459 Log("VAttachVolume: volume %s is junk; it should be destroyed at next salvage\n", path);
2465 vp->nextVnodeUnique = V_uniquifier(vp);
2466 vp->vnodeIndex[vSmall].bitmap = vp->vnodeIndex[vLarge].bitmap = NULL;
2467 #ifndef BITMAP_LATER
2468 if (programType == fileServer && VolumeWriteable(vp)) {
2470 for (i = 0; i < nVNODECLASSES; i++) {
2471 VGetBitmap_r(ec, vp, i);
2473 #ifdef AFS_DEMAND_ATTACH_FS
2474 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
2476 #else /* AFS_DEMAND_ATTACH_FS */
2478 #endif /* AFS_DEMAND_ATTACH_FS */
2479 Log("VAttachVolume: error getting bitmap for volume (%s)\n",
2485 #endif /* BITMAP_LATER */
2487 if (programType == fileServer) {
2488 if (vp->specialStatus)
2489 vp->specialStatus = 0;
2490 if (V_blessed(vp) && V_inService(vp) && !V_needsSalvaged(vp)) {
2492 V_offlineMessage(vp)[0] = '\0';
2496 AddVolumeToHashTable(vp, V_id(vp));
2497 #ifdef AFS_DEMAND_ATTACH_FS
2498 AddVolumeToVByPList_r(vp);
2500 if ((programType != fileServer) ||
2502 VChangeState_r(vp, VOL_STATE_ATTACHED);
2504 VChangeState_r(vp, VOL_STATE_UNATTACHED);
2510 /* Attach an existing volume.
2511 The volume also normally goes online at this time.
2512 An offline volume must be reattached to make it go online.
2516 VAttachVolume(Error * ec, VolumeId volumeId, int mode)
2520 retVal = VAttachVolume_r(ec, volumeId, mode);
2526 VAttachVolume_r(Error * ec, VolumeId volumeId, int mode)
2529 GetVolumePath(ec, volumeId, &part, &name);
2531 register Volume *vp;
2533 vp = VGetVolume_r(&error, volumeId);
2535 assert(V_inUse(vp) == 0);
2536 VDetachVolume_r(ec, vp);
2540 return VAttachVolumeByName_r(ec, part, name, mode);
2543 /* Increment a reference count to a volume, sans context swaps. Requires
2544 * possibly reading the volume header in from the disk, since there's
2545 * an invariant in the volume package that nUsers>0 ==> vp->header is valid.
2547 * N.B. This call can fail if we can't read in the header!! In this case
2548 * we still guarantee we won't context swap, but the ref count won't be
2549 * incremented (otherwise we'd violate the invariant).
2551 /* NOTE: with the demand attach fileserver extensions, the global lock
2552 * is dropped within VHold */
2553 #ifdef AFS_DEMAND_ATTACH_FS
2555 VHold_r(register Volume * vp)
2559 VCreateReservation_r(vp);
2560 VWaitExclusiveState_r(vp);
2562 LoadVolumeHeader(&error, vp);
2564 VCancelReservation_r(vp);
2568 VCancelReservation_r(vp);
2571 #else /* AFS_DEMAND_ATTACH_FS */
2573 VHold_r(register Volume * vp)
2577 LoadVolumeHeader(&error, vp);
2583 #endif /* AFS_DEMAND_ATTACH_FS */
2586 VHold(register Volume * vp)
2590 retVal = VHold_r(vp);
2596 /***************************************************/
2597 /* get and put volume routines */
2598 /***************************************************/
2601 * put back a heavyweight reference to a volume object.
2603 * @param[in] vp volume object pointer
2605 * @pre VOL_LOCK held
2607 * @post heavyweight volume reference put back.
2608 * depending on state, volume may have been taken offline,
2609 * detached, salvaged, freed, etc.
2611 * @internal volume package internal use only
2614 VPutVolume_r(register Volume * vp)
2616 assert(--vp->nUsers >= 0);
2617 if (vp->nUsers == 0) {
2619 ReleaseVolumeHeader(vp->header);
2620 #ifdef AFS_DEMAND_ATTACH_FS
2621 if (!VCheckDetach(vp)) {
2625 #else /* AFS_DEMAND_ATTACH_FS */
2627 #endif /* AFS_DEMAND_ATTACH_FS */
2632 VPutVolume(register Volume * vp)
2640 /* Get a pointer to an attached volume. The pointer is returned regardless
2641 of whether or not the volume is in service or on/off line. An error
2642 code, however, is returned with an indication of the volume's status */
2644 VGetVolume(Error * ec, Error * client_ec, VolId volumeId)
2648 retVal = GetVolume(ec, client_ec, volumeId, NULL, 0);
2654 VGetVolume_r(Error * ec, VolId volumeId)
2656 return GetVolume(ec, NULL, volumeId, NULL, 0);
2659 /* try to get a volume we've previously looked up */
2660 /* for demand attach fs, caller MUST NOT hold a ref count on vp */
2662 VGetVolumeByVp_r(Error * ec, Volume * vp)
2664 return GetVolume(ec, NULL, vp->hashid, vp, 0);
2667 /* private interface for getting a volume handle
2668 * volumeId must be provided.
2669 * hint is an optional parameter to speed up hash lookups
2670 * flags is not used at this time
2672 /* for demand attach fs, caller MUST NOT hold a ref count on hint */
2674 GetVolume(Error * ec, Error * client_ec, VolId volumeId, Volume * hint, int flags)
2677 /* pull this profiling/debugging code out of regular builds */
2679 #define VGET_CTR_INC(x) x++
2680 unsigned short V0 = 0, V1 = 0, V2 = 0, V3 = 0, V5 = 0, V6 =
2681 0, V7 = 0, V8 = 0, V9 = 0;
2682 unsigned short V10 = 0, V11 = 0, V12 = 0, V13 = 0, V14 = 0, V15 = 0;
2684 #define VGET_CTR_INC(x)
2686 #ifdef AFS_DEMAND_ATTACH_FS
2687 Volume *avp, * rvp = hint;
2690 #ifdef AFS_DEMAND_ATTACH_FS
2692 VCreateReservation_r(rvp);
2694 #endif /* AFS_DEMAND_ATTACH_FS */
2702 vp = VLookupVolume_r(ec, volumeId, vp);
2708 #ifdef AFS_DEMAND_ATTACH_FS
2709 if (rvp && (rvp != vp)) {
2710 /* break reservation on old vp */
2711 VCancelReservation_r(rvp);
2714 #endif /* AFS_DEMAND_ATTACH_FS */
2720 /* Until we have reached an initialization level of 2
2721 * we don't know whether this volume exists or not.
2722 * We can't sleep and retry later because before a volume
2723 * is attached, the caller tries to get it first. Just
2724 * return VOFFLINE and the caller can choose whether to
2725 * retry the command or not. */
2735 IncUInt64(&VStats.hdr_gets);
2737 #ifdef AFS_DEMAND_ATTACH_FS
2738 /* block if someone else is performing an exclusive op on this volume */
2741 VCreateReservation_r(rvp);
2743 VWaitExclusiveState_r(vp);
2745 /* short circuit with VNOVOL in the following circumstances:
2748 * VOL_STATE_SHUTTING_DOWN
2750 if ((V_attachState(vp) == VOL_STATE_ERROR) ||
2751 (V_attachState(vp) == VOL_STATE_SHUTTING_DOWN)) {
2758 * short circuit with VOFFLINE in the following circumstances:
2760 * VOL_STATE_UNATTACHED
2762 if (V_attachState(vp) == VOL_STATE_UNATTACHED) {
2768 /* allowable states:
2776 if (vp->salvage.requested) {
2777 VUpdateSalvagePriority_r(vp);
2780 if (V_attachState(vp) == VOL_STATE_PREATTACHED) {
2781 avp = VAttachVolumeByVp_r(ec, vp, 0);
2784 /* VAttachVolumeByVp_r can return a pointer
2785 * != the vp passed to it under certain
2786 * conditions; make sure we don't leak
2787 * reservations if that happens */
2789 VCancelReservation_r(rvp);
2791 VCreateReservation_r(rvp);
2801 if (!vp->pending_vol_op) {
2816 if ((V_attachState(vp) == VOL_STATE_SALVAGING) ||
2817 (*ec == VSALVAGING)) {
2819 /* see CheckVnode() in afsfileprocs.c for an explanation
2820 * of this error code logic */
2821 afs_uint32 now = FT_ApproxTime();
2822 if ((vp->stats.last_salvage + (10 * 60)) >= now) {
2825 *client_ec = VRESTARTING;
2834 LoadVolumeHeader(ec, vp);
2837 /* Only log the error if it was a totally unexpected error. Simply
2838 * a missing inode is likely to be caused by the volume being deleted */
2839 if (errno != ENXIO || LogLevel)
2840 Log("Volume %u: couldn't reread volume header\n",
2842 #ifdef AFS_DEMAND_ATTACH_FS
2843 if (programType == fileServer) {
2844 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
2849 #else /* AFS_DEMAND_ATTACH_FS */
2852 #endif /* AFS_DEMAND_ATTACH_FS */
2856 #ifdef AFS_DEMAND_ATTACH_FS
2858 * this test MUST happen after the volume header is loaded
2860 if (vp->pending_vol_op && !VVolOpLeaveOnline_r(vp, vp->pending_vol_op)) {
2862 /* see CheckVnode() in afsfileprocs.c for an explanation
2863 * of this error code logic */
2864 afs_uint32 now = FT_ApproxTime();
2865 if ((vp->stats.last_vol_op + (10 * 60)) >= now) {
2868 *client_ec = VRESTARTING;
2872 ReleaseVolumeHeader(vp->header);
2876 #endif /* AFS_DEMAND_ATTACH_FS */
2879 if (vp->shuttingDown) {
2886 if (programType == fileServer) {
2888 if (vp->goingOffline) {
2890 #ifdef AFS_DEMAND_ATTACH_FS
2891 /* wait for the volume to go offline */
2892 if (V_attachState(vp) == VOL_STATE_GOING_OFFLINE) {
2893 VWaitStateChange_r(vp);
2895 #elif defined(AFS_PTHREAD_ENV)
2896 VOL_CV_WAIT(&vol_put_volume_cond);
2897 #else /* AFS_PTHREAD_ENV */
2898 LWP_WaitProcess(VPutVolume);
2899 #endif /* AFS_PTHREAD_ENV */
2902 if (vp->specialStatus) {
2904 *ec = vp->specialStatus;
2905 } else if (V_inService(vp) == 0 || V_blessed(vp) == 0) {
2908 } else if (V_inUse(vp) == 0) {
2919 #ifdef AFS_DEMAND_ATTACH_FS
2920 /* if no error, bump nUsers */
2923 VLRU_UpdateAccess_r(vp);
2926 VCancelReservation_r(rvp);
2929 if (client_ec && !*client_ec) {
2932 #else /* AFS_DEMAND_ATTACH_FS */
2933 /* if no error, bump nUsers */
2940 #endif /* AFS_DEMAND_ATTACH_FS */
2947 /***************************************************/
2948 /* Volume offline/detach routines */
2949 /***************************************************/
2951 /* caller MUST hold a heavyweight ref on vp */
2952 #ifdef AFS_DEMAND_ATTACH_FS
2954 VTakeOffline_r(register Volume * vp)
2958 assert(vp->nUsers > 0);
2959 assert(programType == fileServer);
2961 VCreateReservation_r(vp);
2962 VWaitExclusiveState_r(vp);
2964 vp->goingOffline = 1;
2965 V_needsSalvaged(vp) = 1;
2967 VRequestSalvage_r(&error, vp, SALVSYNC_ERROR, 0);
2968 VCancelReservation_r(vp);
2970 #else /* AFS_DEMAND_ATTACH_FS */
2972 VTakeOffline_r(register Volume * vp)
2974 assert(vp->nUsers > 0);
2975 assert(programType == fileServer);
2977 vp->goingOffline = 1;
2978 V_needsSalvaged(vp) = 1;
2980 #endif /* AFS_DEMAND_ATTACH_FS */
2983 VTakeOffline(register Volume * vp)
2991 * force a volume offline.
2993 * @param[in] vp volume object pointer
2994 * @param[in] flags flags (see note below)
2996 * @note the flag VOL_FORCEOFF_NOUPDATE is a recursion control flag
2997 * used when VUpdateVolume_r needs to call VForceOffline_r
2998 * (which in turn would normally call VUpdateVolume_r)
3000 * @see VUpdateVolume_r
3002 * @pre VOL_LOCK must be held.
3003 * for DAFS, caller must hold ref.
3005 * @note for DAFS, it _is safe_ to call this function from an
3008 * @post needsSalvaged flag is set.
3009 * for DAFS, salvage is requested.
3010 * no further references to the volume through the volume
3011 * package will be honored.
3012 * all file descriptor and vnode caches are invalidated.
3014 * @warning this is a heavy-handed interface. it results in
3015 * a volume going offline regardless of the current
3016 * reference count state.
3018 * @internal volume package internal use only
3021 VForceOffline_r(Volume * vp, int flags)
3025 #ifdef AFS_DEMAND_ATTACH_FS
3026 VChangeState_r(vp, VOL_STATE_ERROR);
3031 strcpy(V_offlineMessage(vp),
3032 "Forced offline due to internal error: volume needs to be salvaged");
3033 Log("Volume %u forced offline: it needs salvaging!\n", V_id(vp));
3036 vp->goingOffline = 0;
3037 V_needsSalvaged(vp) = 1;
3038 if (!(flags & VOL_FORCEOFF_NOUPDATE)) {
3039 VUpdateVolume_r(&error, vp, VOL_UPDATE_NOFORCEOFF);
3042 #ifdef AFS_DEMAND_ATTACH_FS
3043 VRequestSalvage_r(&error, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
3044 #endif /* AFS_DEMAND_ATTACH_FS */
3046 #ifdef AFS_PTHREAD_ENV
3047 assert(pthread_cond_broadcast(&vol_put_volume_cond) == 0);
3048 #else /* AFS_PTHREAD_ENV */
3049 LWP_NoYieldSignal(VPutVolume);
3050 #endif /* AFS_PTHREAD_ENV */
3052 VReleaseVolumeHandles_r(vp);
3056 * force a volume offline.
3058 * @param[in] vp volume object pointer
3060 * @see VForceOffline_r
3063 VForceOffline(Volume * vp)
3066 VForceOffline_r(vp, 0);
3070 /* The opposite of VAttachVolume. The volume header is written to disk, with
3071 the inUse bit turned off. A copy of the header is maintained in memory,
3072 however (which is why this is VOffline, not VDetach).
3075 VOffline_r(Volume * vp, char *message)
3078 VolumeId vid = V_id(vp);
3080 assert(programType != volumeUtility);
3085 if (V_offlineMessage(vp)[0] == '\0')
3086 strncpy(V_offlineMessage(vp), message, sizeof(V_offlineMessage(vp)));
3087 V_offlineMessage(vp)[sizeof(V_offlineMessage(vp)) - 1] = '\0';
3089 vp->goingOffline = 1;
3090 #ifdef AFS_DEMAND_ATTACH_FS
3091 VChangeState_r(vp, VOL_STATE_GOING_OFFLINE);
3092 VCreateReservation_r(vp);
3095 /* wait for the volume to go offline */
3096 if (V_attachState(vp) == VOL_STATE_GOING_OFFLINE) {
3097 VWaitStateChange_r(vp);
3099 VCancelReservation_r(vp);
3100 #else /* AFS_DEMAND_ATTACH_FS */
3102 vp = VGetVolume_r(&error, vid); /* Wait for it to go offline */
3103 if (vp) /* In case it was reattached... */
3105 #endif /* AFS_DEMAND_ATTACH_FS */
3109 VOffline(Volume * vp, char *message)
3112 VOffline_r(vp, message);
3116 /* This gets used for the most part by utility routines that don't want
3117 * to keep all the volume headers around. Generally, the file server won't
3118 * call this routine, because then the offline message in the volume header
3119 * (or other information) won't be available to clients. For NAMEI, also
3120 * close the file handles. However, the fileserver does call this during
3121 * an attach following a volume operation.
3124 VDetachVolume_r(Error * ec, Volume * vp)
3127 struct DiskPartition *tpartp;
3128 int notifyServer, useDone = FSYNC_VOL_ON;
3130 *ec = 0; /* always "succeeds" */
3131 if (programType == volumeUtility) {
3132 notifyServer = vp->needsPutBack;
3133 if (V_destroyMe(vp) == DESTROY_ME)
3134 useDone = FSYNC_VOL_DONE;
3135 #ifdef AFS_DEMAND_ATTACH_FS
3136 else if (!V_blessed(vp) || !V_inService(vp))
3137 useDone = FSYNC_VOL_LEAVE_OFF;
3140 tpartp = vp->partition;
3142 DeleteVolumeFromHashTable(vp);
3143 vp->shuttingDown = 1;
3144 #ifdef AFS_DEMAND_ATTACH_FS
3145 DeleteVolumeFromVByPList_r(vp);
3147 VChangeState_r(vp, VOL_STATE_SHUTTING_DOWN);
3148 #endif /* AFS_DEMAND_ATTACH_FS */
3150 /* Will be detached sometime in the future--this is OK since volume is offline */
3152 /* XXX the following code should really be moved to VCheckDetach() since the volume
3153 * is not technically detached until the refcounts reach zero
3155 #ifdef FSSYNC_BUILD_CLIENT
3156 if (programType == volumeUtility && notifyServer) {
3158 * Note: The server is not notified in the case of a bogus volume
3159 * explicitly to make it possible to create a volume, do a partial
3160 * restore, then abort the operation without ever putting the volume
3161 * online. This is essential in the case of a volume move operation
3162 * between two partitions on the same server. In that case, there
3163 * would be two instances of the same volume, one of them bogus,
3164 * which the file server would attempt to put on line
3166 FSYNC_VolOp(volume, tpartp->name, useDone, 0, NULL);
3167 /* XXX this code path is only hit by volume utilities, thus
3168 * V_BreakVolumeCallbacks will always be NULL. if we really
3169 * want to break callbacks in this path we need to use FSYNC_VolOp() */
3171 /* Dettaching it so break all callbacks on it */
3172 if (V_BreakVolumeCallbacks) {
3173 Log("volume %u detached; breaking all call backs\n", volume);
3174 (*V_BreakVolumeCallbacks) (volume);
3178 #endif /* FSSYNC_BUILD_CLIENT */
3182 VDetachVolume(Error * ec, Volume * vp)
3185 VDetachVolume_r(ec, vp);
3190 /***************************************************/
3191 /* Volume fd/inode handle closing routines */
3192 /***************************************************/
3194 /* For VDetachVolume, we close all cached file descriptors, but keep
3195 * the Inode handles in case we need to read from a busy volume.
3197 /* for demand attach, caller MUST hold ref count on vp */
3199 VCloseVolumeHandles_r(Volume * vp)
3201 #ifdef AFS_DEMAND_ATTACH_FS
3202 VolState state_save;
3204 state_save = VChangeState_r(vp, VOL_STATE_OFFLINING);
3209 * XXX need to investigate whether we can perform
3210 * DFlushVolume outside of vol_glock_mutex...
3212 * VCloseVnodeFiles_r drops the glock internally */
3213 DFlushVolume(V_id(vp));
3214 VCloseVnodeFiles_r(vp);
3216 #ifdef AFS_DEMAND_ATTACH_FS
3220 /* Too time consuming and unnecessary for the volserver */
3221 if (programType != volumeUtility) {
3222 IH_CONDSYNC(vp->vnodeIndex[vLarge].handle);
3223 IH_CONDSYNC(vp->vnodeIndex[vSmall].handle);
3224 IH_CONDSYNC(vp->diskDataHandle);
3226 IH_CONDSYNC(vp->linkHandle);
3227 #endif /* AFS_NT40_ENV */
3230 IH_REALLYCLOSE(vp->vnodeIndex[vLarge].handle);
3231 IH_REALLYCLOSE(vp->vnodeIndex[vSmall].handle);
3232 IH_REALLYCLOSE(vp->diskDataHandle);
3233 IH_REALLYCLOSE(vp->linkHandle);
3235 #ifdef AFS_DEMAND_ATTACH_FS
3237 VChangeState_r(vp, state_save);
3241 /* For both VForceOffline and VOffline, we close all relevant handles.
3242 * For VOffline, if we re-attach the volume, the files may possible be
3243 * different than before.
3245 /* for demand attach, caller MUST hold a ref count on vp */
3247 VReleaseVolumeHandles_r(Volume * vp)
3249 #ifdef AFS_DEMAND_ATTACH_FS
3250 VolState state_save;
3252 state_save = VChangeState_r(vp, VOL_STATE_DETACHING);
3255 /* XXX need to investigate whether we can perform
3256 * DFlushVolume outside of vol_glock_mutex... */
3257 DFlushVolume(V_id(vp));
3259 VReleaseVnodeFiles_r(vp); /* releases the glock internally */
3261 #ifdef AFS_DEMAND_ATTACH_FS
3265 /* Too time consuming and unnecessary for the volserver */
3266 if (programType != volumeUtility) {
3267 IH_CONDSYNC(vp->vnodeIndex[vLarge].handle);
3268 IH_CONDSYNC(vp->vnodeIndex[vSmall].handle);
3269 IH_CONDSYNC(vp->diskDataHandle);
3271 IH_CONDSYNC(vp->linkHandle);
3272 #endif /* AFS_NT40_ENV */
3275 IH_RELEASE(vp->vnodeIndex[vLarge].handle);
3276 IH_RELEASE(vp->vnodeIndex[vSmall].handle);
3277 IH_RELEASE(vp->diskDataHandle);
3278 IH_RELEASE(vp->linkHandle);
3280 #ifdef AFS_DEMAND_ATTACH_FS
3282 VChangeState_r(vp, state_save);
3287 /***************************************************/
3288 /* Volume write and fsync routines */
3289 /***************************************************/
3292 VUpdateVolume_r(Error * ec, Volume * vp, int flags)
3294 #ifdef AFS_DEMAND_ATTACH_FS
3295 VolState state_save;
3297 if (flags & VOL_UPDATE_WAIT) {
3298 VCreateReservation_r(vp);
3299 VWaitExclusiveState_r(vp);
3304 if (programType == fileServer)
3306 (V_inUse(vp) ? V_nextVnodeUnique(vp) +
3307 200 : V_nextVnodeUnique(vp));
3309 #ifdef AFS_DEMAND_ATTACH_FS
3310 state_save = VChangeState_r(vp, VOL_STATE_UPDATING);
3314 WriteVolumeHeader_r(ec, vp);
3316 #ifdef AFS_DEMAND_ATTACH_FS
3318 VChangeState_r(vp, state_save);
3319 if (flags & VOL_UPDATE_WAIT) {
3320 VCancelReservation_r(vp);
3325 Log("VUpdateVolume: error updating volume header, volume %u (%s)\n",
3326 V_id(vp), V_name(vp));
3327 /* try to update on-disk header,
3328 * while preventing infinite recursion */
3329 if (!(flags & VOL_UPDATE_NOFORCEOFF)) {
3330 VForceOffline_r(vp, VOL_FORCEOFF_NOUPDATE);
3336 VUpdateVolume(Error * ec, Volume * vp)
3339 VUpdateVolume_r(ec, vp, VOL_UPDATE_WAIT);
3344 VSyncVolume_r(Error * ec, Volume * vp, int flags)
3348 #ifdef AFS_DEMAND_ATTACH_FS
3349 VolState state_save;
3352 if (flags & VOL_SYNC_WAIT) {
3353 VUpdateVolume_r(ec, vp, VOL_UPDATE_WAIT);
3355 VUpdateVolume_r(ec, vp, 0);
3358 #ifdef AFS_DEMAND_ATTACH_FS
3359 state_save = VChangeState_r(vp, VOL_STATE_UPDATING);
3362 fdP = IH_OPEN(V_diskDataHandle(vp));
3363 assert(fdP != NULL);
3364 code = FDH_SYNC(fdP);
3367 #ifdef AFS_DEMAND_ATTACH_FS
3369 VChangeState_r(vp, state_save);
3375 VSyncVolume(Error * ec, Volume * vp)
3378 VSyncVolume_r(ec, vp, VOL_SYNC_WAIT);
3383 /***************************************************/
3384 /* Volume dealloaction routines */
3385 /***************************************************/
3387 #ifdef AFS_DEMAND_ATTACH_FS
3389 FreeVolume(Volume * vp)
3391 /* free the heap space, iff it's safe.
3392 * otherwise, pull it out of the hash table, so it
3393 * will get deallocated when all refs to it go away */
3394 if (!VCheckFree(vp)) {
3395 DeleteVolumeFromHashTable(vp);
3396 DeleteVolumeFromVByPList_r(vp);
3398 /* make sure we invalidate the header cache entry */
3399 FreeVolumeHeader(vp);
3402 #endif /* AFS_DEMAND_ATTACH_FS */
3405 ReallyFreeVolume(Volume * vp)
3410 #ifdef AFS_DEMAND_ATTACH_FS
3412 VChangeState_r(vp, VOL_STATE_FREED);
3413 if (vp->pending_vol_op)
3414 free(vp->pending_vol_op);
3415 #endif /* AFS_DEMAND_ATTACH_FS */
3416 for (i = 0; i < nVNODECLASSES; i++)
3417 if (vp->vnodeIndex[i].bitmap)
3418 free(vp->vnodeIndex[i].bitmap);
3419 FreeVolumeHeader(vp);
3420 #ifndef AFS_DEMAND_ATTACH_FS
3421 DeleteVolumeFromHashTable(vp);
3422 #endif /* AFS_DEMAND_ATTACH_FS */
3426 /* check to see if we should shutdown this volume
3427 * returns 1 if volume was freed, 0 otherwise */
3428 #ifdef AFS_DEMAND_ATTACH_FS
3430 VCheckDetach(register Volume * vp)
3434 if (vp->nUsers || vp->nWaiters)
3437 if (vp->shuttingDown) {
3439 VReleaseVolumeHandles_r(vp);
3441 ReallyFreeVolume(vp);
3442 if (programType == fileServer) {
3443 assert(pthread_cond_broadcast(&vol_put_volume_cond) == 0);
3448 #else /* AFS_DEMAND_ATTACH_FS */
3450 VCheckDetach(register Volume * vp)
3457 if (vp->shuttingDown) {
3459 VReleaseVolumeHandles_r(vp);
3460 ReallyFreeVolume(vp);
3461 if (programType == fileServer) {
3462 #if defined(AFS_PTHREAD_ENV)
3463 assert(pthread_cond_broadcast(&vol_put_volume_cond) == 0);
3464 #else /* AFS_PTHREAD_ENV */
3465 LWP_NoYieldSignal(VPutVolume);
3466 #endif /* AFS_PTHREAD_ENV */
3471 #endif /* AFS_DEMAND_ATTACH_FS */
3473 /* check to see if we should offline this volume
3474 * return 1 if volume went offline, 0 otherwise */
3475 #ifdef AFS_DEMAND_ATTACH_FS
3477 VCheckOffline(register Volume * vp)
3479 Volume * rvp = NULL;
3482 if (vp->goingOffline && !vp->nUsers) {
3484 assert(programType == fileServer);
3485 assert((V_attachState(vp) != VOL_STATE_ATTACHED) &&
3486 (V_attachState(vp) != VOL_STATE_FREED) &&
3487 (V_attachState(vp) != VOL_STATE_PREATTACHED) &&
3488 (V_attachState(vp) != VOL_STATE_UNATTACHED));
3492 * VOL_STATE_GOING_OFFLINE
3493 * VOL_STATE_SHUTTING_DOWN
3494 * VIsErrorState(V_attachState(vp))
3495 * VIsExclusiveState(V_attachState(vp))
3498 VCreateReservation_r(vp);
3499 VChangeState_r(vp, VOL_STATE_OFFLINING);
3502 /* must clear the goingOffline flag before we drop the glock */
3503 vp->goingOffline = 0;
3508 /* perform async operations */
3509 VUpdateVolume_r(&error, vp, 0);
3510 VCloseVolumeHandles_r(vp);
3513 Log("VOffline: Volume %u (%s) is now offline", V_id(vp),
3515 if (V_offlineMessage(vp)[0])
3516 Log(" (%s)", V_offlineMessage(vp));
3520 /* invalidate the volume header cache entry */
3521 FreeVolumeHeader(vp);
3523 /* if nothing changed state to error or salvaging,
3524 * drop state to unattached */
3525 if (!VIsErrorState(V_attachState(vp))) {
3526 VChangeState_r(vp, VOL_STATE_UNATTACHED);
3528 VCancelReservation_r(vp);
3529 /* no usage of vp is safe beyond this point */
3533 #else /* AFS_DEMAND_ATTACH_FS */
3535 VCheckOffline(register Volume * vp)
3537 Volume * rvp = NULL;
3540 if (vp->goingOffline && !vp->nUsers) {
3542 assert(programType == fileServer);
3545 vp->goingOffline = 0;
3547 VUpdateVolume_r(&error, vp, 0);
3548 VCloseVolumeHandles_r(vp);
3550 Log("VOffline: Volume %u (%s) is now offline", V_id(vp),
3552 if (V_offlineMessage(vp)[0])
3553 Log(" (%s)", V_offlineMessage(vp));
3556 FreeVolumeHeader(vp);
3557 #ifdef AFS_PTHREAD_ENV
3558 assert(pthread_cond_broadcast(&vol_put_volume_cond) == 0);
3559 #else /* AFS_PTHREAD_ENV */
3560 LWP_NoYieldSignal(VPutVolume);
3561 #endif /* AFS_PTHREAD_ENV */
3565 #endif /* AFS_DEMAND_ATTACH_FS */
3567 /***************************************************/
3568 /* demand attach fs ref counting routines */
3569 /***************************************************/
3571 #ifdef AFS_DEMAND_ATTACH_FS
3572 /* the following two functions handle reference counting for
3573 * asynchronous operations on volume structs.
3575 * their purpose is to prevent a VDetachVolume or VShutdown
3576 * from free()ing the Volume struct during an async i/o op */
3578 /* register with the async volume op ref counter */
3579 /* VCreateReservation_r moved into inline code header because it
3580 * is now needed in vnode.c -- tkeiser 11/20/2007
3584 * decrement volume-package internal refcount.
3586 * @param vp volume object pointer
3588 * @internal volume package internal use only
3591 * @arg VOL_LOCK is held
3592 * @arg lightweight refcount held
3594 * @post volume waiters refcount is decremented; volume may
3595 * have been deallocated/shutdown/offlined/salvaged/
3596 * whatever during the process
3598 * @warning once you have tossed your last reference (you can acquire
3599 * lightweight refs recursively) it is NOT SAFE to reference
3600 * a volume object pointer ever again
3602 * @see VCreateReservation_r
3604 * @note DEMAND_ATTACH_FS only
3607 VCancelReservation_r(Volume * vp)
3609 assert(--vp->nWaiters >= 0);
3610 if (vp->nWaiters == 0) {
3612 if (!VCheckDetach(vp)) {
3619 /* check to see if we should free this volume now
3620 * return 1 if volume was freed, 0 otherwise */
3622 VCheckFree(Volume * vp)
3625 if ((vp->nUsers == 0) &&
3626 (vp->nWaiters == 0) &&
3627 !(V_attachFlags(vp) & (VOL_IN_HASH |
3631 ReallyFreeVolume(vp);
3636 #endif /* AFS_DEMAND_ATTACH_FS */
3639 /***************************************************/
3640 /* online volume operations routines */
3641 /***************************************************/
3643 #ifdef AFS_DEMAND_ATTACH_FS
3645 * register a volume operation on a given volume.
3647 * @param[in] vp volume object
3648 * @param[in] vopinfo volume operation info object
3650 * @pre VOL_LOCK is held
3652 * @post volume operation info object attached to volume object.
3653 * volume operation statistics updated.
3655 * @note by "attached" we mean a copy of the passed in object is made
3657 * @internal volume package internal use only
3660 VRegisterVolOp_r(Volume * vp, FSSYNC_VolOp_info * vopinfo)
3662 FSSYNC_VolOp_info * info;
3664 /* attach a vol op info node to the volume struct */
3665 info = (FSSYNC_VolOp_info *) malloc(sizeof(FSSYNC_VolOp_info));
3666 assert(info != NULL);
3667 memcpy(info, vopinfo, sizeof(FSSYNC_VolOp_info));
3668 vp->pending_vol_op = info;
3671 vp->stats.last_vol_op = FT_ApproxTime();
3672 vp->stats.vol_ops++;
3673 IncUInt64(&VStats.vol_ops);
3679 * deregister the volume operation attached to this volume.
3681 * @param[in] vp volume object pointer
3683 * @pre VOL_LOCK is held
3685 * @post the volume operation info object is detached from the volume object
3687 * @internal volume package internal use only
3690 VDeregisterVolOp_r(Volume * vp)
3692 if (vp->pending_vol_op) {
3693 free(vp->pending_vol_op);
3694 vp->pending_vol_op = NULL;
3698 #endif /* AFS_DEMAND_ATTACH_FS */
3701 * determine whether it is safe to leave a volume online during
3702 * the volume operation described by the vopinfo object.
3704 * @param[in] vp volume object
3705 * @param[in] vopinfo volume operation info object
3707 * @return whether it is safe to leave volume online
3708 * @retval 0 it is NOT SAFE to leave the volume online
3709 * @retval 1 it is safe to leave the volume online during the operation
3712 * @arg VOL_LOCK is held
3713 * @arg disk header attached to vp (heavyweight ref on vp will guarantee
3714 * this condition is met)
3716 * @internal volume package internal use only
3719 VVolOpLeaveOnline_r(Volume * vp, FSSYNC_VolOp_info * vopinfo)
3721 return (vopinfo->com.command == FSYNC_VOL_NEEDVOLUME &&
3722 (vopinfo->com.reason == V_READONLY ||
3723 (!VolumeWriteable(vp) &&
3724 (vopinfo->com.reason == V_CLONE ||
3725 vopinfo->com.reason == V_DUMP))));
3729 * determine whether VBUSY should be set during this volume operation.
3731 * @param[in] vp volume object
3732 * @param[in] vopinfo volume operation info object
3734 * @return whether VBUSY should be set
3735 * @retval 0 VBUSY does NOT need to be set
3736 * @retval 1 VBUSY SHOULD be set
3738 * @pre VOL_LOCK is held
3740 * @internal volume package internal use only
3743 VVolOpSetVBusy_r(Volume * vp, FSSYNC_VolOp_info * vopinfo)
3745 return (vopinfo->com.command == FSYNC_VOL_NEEDVOLUME &&
3746 (vopinfo->com.reason == V_CLONE ||
3747 vopinfo->com.reason == V_DUMP));
3751 /***************************************************/
3752 /* online salvager routines */
3753 /***************************************************/
3754 #if defined(AFS_DEMAND_ATTACH_FS)
3755 #define SALVAGE_PRIO_UPDATE_INTERVAL 3 /**< number of seconds between prio updates */
3756 #define SALVAGE_COUNT_MAX 16 /**< number of online salvages we
3757 * allow before moving the volume
3758 * into a permanent error state
3760 * once this threshold is reached,
3761 * the operator will have to manually
3762 * issue a 'bos salvage' to bring
3763 * the volume back online
3767 * check whether a salvage needs to be performed on this volume.
3769 * @param[in] vp pointer to volume object
3771 * @return status code
3772 * @retval 0 no salvage scheduled
3773 * @retval 1 a salvage has been scheduled with the salvageserver
3775 * @pre VOL_LOCK is held
3777 * @post if salvage request flag is set and nUsers and nWaiters are zero,
3778 * then a salvage will be requested
3780 * @note this is one of the event handlers called by VCancelReservation_r
3782 * @see VCancelReservation_r
3784 * @internal volume package internal use only.
3787 VCheckSalvage(register Volume * vp)
3790 #ifdef SALVSYNC_BUILD_CLIENT
3791 if (vp->nUsers || vp->nWaiters)
3793 if (vp->salvage.requested) {
3794 VScheduleSalvage_r(vp);
3797 #endif /* SALVSYNC_BUILD_CLIENT */
3802 * request volume salvage.
3804 * @param[out] ec computed client error code
3805 * @param[in] vp volume object pointer
3806 * @param[in] reason reason code (passed to salvageserver via SALVSYNC)
3807 * @param[in] flags see flags note below
3810 * VOL_SALVAGE_INVALIDATE_HEADER causes volume header cache entry
3811 * to be invalidated.
3813 * @pre VOL_LOCK is held.
3815 * @post volume state is changed.
3816 * for fileserver, salvage will be requested once refcount reaches zero.
3818 * @return operation status code
3819 * @retval 0 volume salvage will occur
3820 * @retval 1 volume salvage could not be scheduled
3822 * @note DAFS fileserver only
3824 * @note this call does not synchronously schedule a volume salvage. rather,
3825 * it sets volume state so that when volume refcounts reach zero, a
3826 * volume salvage will occur. by "refcounts", we mean both nUsers and
3827 * nWaiters must be zero.
3829 * @internal volume package internal use only.
3832 VRequestSalvage_r(Error * ec, Volume * vp, int reason, int flags)
3836 * for DAFS volume utilities, transition to error state
3837 * (at some point in the future, we should consider
3838 * making volser talk to salsrv)
3840 if (programType != fileServer) {
3841 VChangeState_r(vp, VOL_STATE_ERROR);
3846 if (!vp->salvage.requested) {
3847 vp->salvage.requested = 1;
3848 vp->salvage.reason = reason;
3849 vp->stats.last_salvage = FT_ApproxTime();
3850 if (flags & VOL_SALVAGE_INVALIDATE_HEADER) {
3851 /* XXX this should likely be changed to FreeVolumeHeader() */
3852 ReleaseVolumeHeader(vp->header);
3854 if (vp->stats.salvages < SALVAGE_COUNT_MAX) {
3855 VChangeState_r(vp, VOL_STATE_SALVAGING);
3858 Log("VRequestSalvage: volume %u online salvaged too many times; forced offline.\n", vp->hashid);
3859 VChangeState_r(vp, VOL_STATE_ERROR);
3868 * update salvageserver scheduling priority for a volume.
3870 * @param[in] vp pointer to volume object
3872 * @return operation status
3874 * @retval 1 request denied, or SALVSYNC communications failure
3876 * @pre VOL_LOCK is held.
3878 * @post in-core salvage priority counter is incremented. if at least
3879 * SALVAGE_PRIO_UPDATE_INTERVAL seconds have elapsed since the
3880 * last SALVSYNC_RAISEPRIO request, we contact the salvageserver
3881 * to update its priority queue. if no salvage is scheduled,
3882 * this function is a no-op.
3884 * @note DAFS fileserver only
3886 * @note this should be called whenever a VGetVolume fails due to a
3887 * pending salvage request
3889 * @todo should set exclusive state and drop glock around salvsync call
3891 * @internal volume package internal use only.
3894 VUpdateSalvagePriority_r(Volume * vp)
3899 #ifdef SALVSYNC_BUILD_CLIENT
3901 now = FT_ApproxTime();
3903 /* update the salvageserver priority queue occasionally so that
3904 * frequently requested volumes get moved to the head of the queue
3906 if ((vp->salvage.scheduled) &&
3907 (vp->stats.last_salvage_req < (now-SALVAGE_PRIO_UPDATE_INTERVAL))) {
3908 code = SALVSYNC_SalvageVolume(vp->hashid,
3909 VPartitionPath(vp->partition),
3914 vp->stats.last_salvage_req = now;
3915 if (code != SYNC_OK) {
3919 #endif /* SALVSYNC_BUILD_CLIENT */
3925 * schedule a salvage with the salvage server.
3927 * @param[in] vp pointer to volume object
3929 * @return operation status
3930 * @retval 0 salvage scheduled successfully
3931 * @retval 1 salvage not scheduled, or SALVSYNC com error
3934 * @arg VOL_LOCK is held.
3935 * @arg nUsers and nWaiters should be zero.
3937 * @post salvageserver is sent a salvage request
3939 * @note DAFS fileserver only
3941 * @internal volume package internal use only.
3944 VScheduleSalvage_r(Volume * vp)
3947 #ifdef SALVSYNC_BUILD_CLIENT
3948 VolState state_save;
3951 if (vp->nWaiters || vp->nUsers) {
3955 /* prevent endless salvage,attach,salvage,attach,... loops */
3956 if (vp->stats.salvages >= SALVAGE_COUNT_MAX)
3959 if (!vp->salvage.scheduled) {
3960 /* if we haven't previously scheduled a salvage, do so now
3962 * set the volume to an exclusive state and drop the lock
3963 * around the SALVSYNC call
3965 * note that we do NOT acquire a reservation here -- doing so
3966 * could result in unbounded recursion
3968 strlcpy(partName, VPartitionPath(vp->partition), sizeof(partName));
3969 state_save = VChangeState_r(vp, VOL_STATE_SALVSYNC_REQ);
3970 V_attachFlags(vp) |= VOL_IS_BUSY;
3973 /* can't use V_id() since there's no guarantee
3974 * we have the disk data header at this point */
3975 code = SALVSYNC_SalvageVolume(vp->hashid,
3982 VChangeState_r(vp, state_save);
3983 V_attachFlags(vp) &= ~(VOL_IS_BUSY);
3985 if (code == SYNC_OK) {
3986 vp->salvage.scheduled = 1;
3987 vp->stats.salvages++;
3988 vp->stats.last_salvage_req = FT_ApproxTime();
3989 IncUInt64(&VStats.salvages);
3993 case SYNC_BAD_COMMAND:
3994 case SYNC_COM_ERROR:
3997 Log("VScheduleSalvage_r: SALVSYNC request denied\n");
4000 Log("VScheduleSalvage_r: SALVSYNC unknown protocol error\n");
4005 #endif /* SALVSYNC_BUILD_CLIENT */
4010 * ask salvageserver to cancel a scheduled salvage operation.
4012 * @param[in] vp pointer to volume object
4013 * @param[in] reason SALVSYNC protocol reason code
4015 * @return operation status
4017 * @retval 1 request failed
4019 * @pre VOL_LOCK is held.
4021 * @post salvageserver is sent a request to cancel the volume salvage
4023 * @todo should set exclusive state and drop glock around salvsync call
4025 * @internal volume package internal use only.
4028 VCancelSalvage_r(Volume * vp, int reason)
4032 #ifdef SALVSYNC_BUILD_CLIENT
4033 if (vp->salvage.scheduled) {
4034 code = SALVSYNC_SalvageVolume(vp->hashid,
4035 VPartitionPath(vp->partition),
4040 if (code == SYNC_OK) {
4041 vp->salvage.scheduled = 0;
4046 #endif /* SALVSYNC_BUILD_CLIENT */
4051 #ifdef SALVSYNC_BUILD_CLIENT
4053 * connect to the salvageserver SYNC service.
4055 * @return operation status
4059 * @post connection to salvageserver SYNC service established
4061 * @see VConnectSALV_r
4062 * @see VDisconnectSALV
4063 * @see VReconnectSALV
4070 retVal = VConnectSALV_r();
4076 * connect to the salvageserver SYNC service.
4078 * @return operation status
4082 * @pre VOL_LOCK is held.
4084 * @post connection to salvageserver SYNC service established
4087 * @see VDisconnectSALV_r
4088 * @see VReconnectSALV_r
4089 * @see SALVSYNC_clientInit
4091 * @internal volume package internal use only.
4094 VConnectSALV_r(void)
4096 return SALVSYNC_clientInit();
4100 * disconnect from the salvageserver SYNC service.
4102 * @return operation status
4105 * @pre client should have a live connection to the salvageserver
4107 * @post connection to salvageserver SYNC service destroyed
4109 * @see VDisconnectSALV_r
4111 * @see VReconnectSALV
4114 VDisconnectSALV(void)
4118 VDisconnectSALV_r();
4124 * disconnect from the salvageserver SYNC service.
4126 * @return operation status
4130 * @arg VOL_LOCK is held.
4131 * @arg client should have a live connection to the salvageserver.
4133 * @post connection to salvageserver SYNC service destroyed
4135 * @see VDisconnectSALV
4136 * @see VConnectSALV_r
4137 * @see VReconnectSALV_r
4138 * @see SALVSYNC_clientFinis
4140 * @internal volume package internal use only.
4143 VDisconnectSALV_r(void)
4145 return SALVSYNC_clientFinis();
4149 * disconnect and then re-connect to the salvageserver SYNC service.
4151 * @return operation status
4155 * @pre client should have a live connection to the salvageserver
4157 * @post old connection is dropped, and a new one is established
4160 * @see VDisconnectSALV
4161 * @see VReconnectSALV_r
4164 VReconnectSALV(void)
4168 retVal = VReconnectSALV_r();
4174 * disconnect and then re-connect to the salvageserver SYNC service.
4176 * @return operation status
4181 * @arg VOL_LOCK is held.
4182 * @arg client should have a live connection to the salvageserver.
4184 * @post old connection is dropped, and a new one is established
4186 * @see VConnectSALV_r
4187 * @see VDisconnectSALV
4188 * @see VReconnectSALV
4189 * @see SALVSYNC_clientReconnect
4191 * @internal volume package internal use only.
4194 VReconnectSALV_r(void)
4196 return SALVSYNC_clientReconnect();
4198 #endif /* SALVSYNC_BUILD_CLIENT */
4199 #endif /* AFS_DEMAND_ATTACH_FS */
4202 /***************************************************/
4203 /* FSSYNC routines */
4204 /***************************************************/
4206 /* This must be called by any volume utility which needs to run while the
4207 file server is also running. This is separated from VInitVolumePackage so
4208 that a utility can fork--and each of the children can independently
4209 initialize communication with the file server */
4210 #ifdef FSSYNC_BUILD_CLIENT
4212 * connect to the fileserver SYNC service.
4214 * @return operation status
4219 * @arg VInit must equal 2.
4220 * @arg Program Type must not be fileserver or salvager.
4222 * @post connection to fileserver SYNC service established
4225 * @see VDisconnectFS
4226 * @see VChildProcReconnectFS
4233 retVal = VConnectFS_r();
4239 * connect to the fileserver SYNC service.
4241 * @return operation status
4246 * @arg VInit must equal 2.
4247 * @arg Program Type must not be fileserver or salvager.
4248 * @arg VOL_LOCK is held.
4250 * @post connection to fileserver SYNC service established
4253 * @see VDisconnectFS_r
4254 * @see VChildProcReconnectFS_r
4256 * @internal volume package internal use only.
4262 assert((VInit == 2) &&
4263 (programType != fileServer) &&
4264 (programType != salvager));
4265 rc = FSYNC_clientInit();
4272 * disconnect from the fileserver SYNC service.
4275 * @arg client should have a live connection to the fileserver.
4276 * @arg VOL_LOCK is held.
4277 * @arg Program Type must not be fileserver or salvager.
4279 * @post connection to fileserver SYNC service destroyed
4281 * @see VDisconnectFS
4283 * @see VChildProcReconnectFS_r
4285 * @internal volume package internal use only.
4288 VDisconnectFS_r(void)
4290 assert((programType != fileServer) &&
4291 (programType != salvager));
4292 FSYNC_clientFinis();
4297 * disconnect from the fileserver SYNC service.
4300 * @arg client should have a live connection to the fileserver.
4301 * @arg Program Type must not be fileserver or salvager.
4303 * @post connection to fileserver SYNC service destroyed
4305 * @see VDisconnectFS_r
4307 * @see VChildProcReconnectFS
4318 * connect to the fileserver SYNC service from a child process following a fork.
4320 * @return operation status
4325 * @arg VOL_LOCK is held.
4326 * @arg current FSYNC handle is shared with a parent process
4328 * @post current FSYNC handle is discarded and a new connection to the
4329 * fileserver SYNC service is established
4331 * @see VChildProcReconnectFS
4333 * @see VDisconnectFS_r
4335 * @internal volume package internal use only.
4338 VChildProcReconnectFS_r(void)
4340 return FSYNC_clientChildProcReconnect();
4344 * connect to the fileserver SYNC service from a child process following a fork.
4346 * @return operation status
4350 * @pre current FSYNC handle is shared with a parent process
4352 * @post current FSYNC handle is discarded and a new connection to the
4353 * fileserver SYNC service is established
4355 * @see VChildProcReconnectFS_r
4357 * @see VDisconnectFS
4360 VChildProcReconnectFS(void)
4364 ret = VChildProcReconnectFS_r();
4368 #endif /* FSSYNC_BUILD_CLIENT */
4371 /***************************************************/
4372 /* volume bitmap routines */
4373 /***************************************************/
4376 * For demand attach fs, flags parameter controls
4377 * locking behavior. If (flags & VOL_ALLOC_BITMAP_WAIT)
4378 * is set, then this function will create a reservation
4379 * and block on any other exclusive operations. Otherwise,
4380 * this function assumes the caller already has exclusive
4381 * access to vp, and we just change the volume state.
4384 VAllocBitmapEntry_r(Error * ec, Volume * vp,
4385 struct vnodeIndex *index, int flags)
4388 register byte *bp, *ep;
4389 #ifdef AFS_DEMAND_ATTACH_FS
4390 VolState state_save;
4391 #endif /* AFS_DEMAND_ATTACH_FS */
4395 /* This test is probably redundant */
4396 if (!VolumeWriteable(vp)) {
4397 *ec = (bit32) VREADONLY;
4401 #ifdef AFS_DEMAND_ATTACH_FS
4402 if (flags & VOL_ALLOC_BITMAP_WAIT) {
4403 VCreateReservation_r(vp);
4404 VWaitExclusiveState_r(vp);
4406 state_save = VChangeState_r(vp, VOL_STATE_GET_BITMAP);
4407 #endif /* AFS_DEMAND_ATTACH_FS */
4410 if ((programType == fileServer) && !index->bitmap) {
4412 #ifndef AFS_DEMAND_ATTACH_FS
4413 /* demand attach fs uses the volume state to avoid races.
4414 * specialStatus field is not used at all */
4416 if (vp->specialStatus == VBUSY) {
4417 if (vp->goingOffline) { /* vos dump waiting for the volume to
4418 * go offline. We probably come here
4419 * from AddNewReadableResidency */
4422 while (vp->specialStatus == VBUSY) {
4423 #ifdef AFS_PTHREAD_ENV
4427 #else /* !AFS_PTHREAD_ENV */
4429 #endif /* !AFS_PTHREAD_ENV */
4433 #endif /* !AFS_DEMAND_ATTACH_FS */
4435 if (!index->bitmap) {
4436 #ifndef AFS_DEMAND_ATTACH_FS
4437 vp->specialStatus = VBUSY; /* Stop anyone else from using it. */
4438 #endif /* AFS_DEMAND_ATTACH_FS */
4439 for (i = 0; i < nVNODECLASSES; i++) {
4440 VGetBitmap_r(ec, vp, i);
4442 #ifdef AFS_DEMAND_ATTACH_FS
4443 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
4444 #else /* AFS_DEMAND_ATTACH_FS */
4445 DeleteVolumeFromHashTable(vp);
4446 vp->shuttingDown = 1; /* Let who has it free it. */
4447 vp->specialStatus = 0;
4448 #endif /* AFS_DEMAND_ATTACH_FS */
4453 #ifndef AFS_DEMAND_ATTACH_FS
4455 vp->specialStatus = 0; /* Allow others to have access. */
4456 #endif /* AFS_DEMAND_ATTACH_FS */
4459 #endif /* BITMAP_LATER */
4461 #ifdef AFS_DEMAND_ATTACH_FS
4463 #endif /* AFS_DEMAND_ATTACH_FS */
4464 bp = index->bitmap + index->bitmapOffset;
4465 ep = index->bitmap + index->bitmapSize;
4467 if ((*(bit32 *) bp) != (bit32) 0xffffffff) {
4469 index->bitmapOffset = (afs_uint32) (bp - index->bitmap);
4472 o = ffs(~*bp) - 1; /* ffs is documented in BSTRING(3) */
4474 ret = (VnodeId) ((bp - index->bitmap) * 8 + o);
4475 #ifdef AFS_DEMAND_ATTACH_FS
4477 #endif /* AFS_DEMAND_ATTACH_FS */
4480 bp += sizeof(bit32) /* i.e. 4 */ ;
4482 /* No bit map entry--must grow bitmap */
4484 realloc(index->bitmap, index->bitmapSize + VOLUME_BITMAP_GROWSIZE);
4487 bp += index->bitmapSize;
4488 memset(bp, 0, VOLUME_BITMAP_GROWSIZE);
4489 index->bitmapOffset = index->bitmapSize;
4490 index->bitmapSize += VOLUME_BITMAP_GROWSIZE;
4492 ret = index->bitmapOffset * 8;
4493 #ifdef AFS_DEMAND_ATTACH_FS
4495 #endif /* AFS_DEMAND_ATTACH_FS */
4498 #ifdef AFS_DEMAND_ATTACH_FS
4499 VChangeState_r(vp, state_save);
4500 if (flags & VOL_ALLOC_BITMAP_WAIT) {
4501 VCancelReservation_r(vp);
4503 #endif /* AFS_DEMAND_ATTACH_FS */
4508 VAllocBitmapEntry(Error * ec, Volume * vp, register struct vnodeIndex * index)
4512 retVal = VAllocBitmapEntry_r(ec, vp, index, VOL_ALLOC_BITMAP_WAIT);
4518 VFreeBitMapEntry_r(Error * ec, register struct vnodeIndex *index,
4521 unsigned int offset;
4527 #endif /* BITMAP_LATER */
4528 offset = bitNumber >> 3;
4529 if (offset >= index->bitmapSize) {
4533 if (offset < index->bitmapOffset)
4534 index->bitmapOffset = offset & ~3; /* Truncate to nearest bit32 */
4535 *(index->bitmap + offset) &= ~(1 << (bitNumber & 0x7));
4539 VFreeBitMapEntry(Error * ec, register struct vnodeIndex *index,
4543 VFreeBitMapEntry_r(ec, index, bitNumber);
4547 /* this function will drop the glock internally.
4548 * for old pthread fileservers, this is safe thanks to vbusy.
4550 * for demand attach fs, caller must have already called
4551 * VCreateReservation_r and VWaitExclusiveState_r */
4553 VGetBitmap_r(Error * ec, Volume * vp, VnodeClass class)
4555 StreamHandle_t *file;
4558 struct VnodeClassInfo *vcp = &VnodeClassInfo[class];
4559 struct vnodeIndex *vip = &vp->vnodeIndex[class];
4560 struct VnodeDiskObject *vnode;
4561 unsigned int unique = 0;
4565 #endif /* BITMAP_LATER */
4566 #ifdef AFS_DEMAND_ATTACH_FS
4567 VolState state_save;
4568 #endif /* AFS_DEMAND_ATTACH_FS */
4572 #ifdef AFS_DEMAND_ATTACH_FS
4573 state_save = VChangeState_r(vp, VOL_STATE_GET_BITMAP);
4574 #endif /* AFS_DEMAND_ATTACH_FS */
4577 fdP = IH_OPEN(vip->handle);
4578 assert(fdP != NULL);
4579 file = FDH_FDOPEN(fdP, "r");
4580 assert(file != NULL);
4581 vnode = (VnodeDiskObject *) malloc(vcp->diskSize);
4582 assert(vnode != NULL);
4583 size = OS_SIZE(fdP->fd_fd);
4585 nVnodes = (size <= vcp->diskSize ? 0 : size - vcp->diskSize)
4587 vip->bitmapSize = ((nVnodes / 8) + 10) / 4 * 4; /* The 10 is a little extra so
4588 * a few files can be created in this volume,
4589 * the whole thing is rounded up to nearest 4