2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
9 * Portions Copyright (c) 2005-2008 Sine Nomine Associates
12 /* 1/1/89: NB: this stuff is all going to be replaced. Don't take it too seriously */
17 Institution: The Information Technology Center, Carnegie-Mellon University
21 #include <afsconfig.h>
22 #include <afs/param.h>
28 #include <afs/afsint.h>
31 #include <sys/param.h>
32 #if !defined(AFS_SGI_ENV)
35 #else /* AFS_OSF_ENV */
36 #ifdef AFS_VFSINCL_ENV
39 #include <sys/fs/ufs_fs.h>
41 #if defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
42 #include <ufs/ufs/dinode.h>
43 #include <ufs/ffs/fs.h>
48 #else /* AFS_VFSINCL_ENV */
49 #if !defined(AFS_AIX_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_XBSD_ENV)
52 #endif /* AFS_VFSINCL_ENV */
53 #endif /* AFS_OSF_ENV */
54 #endif /* AFS_SGI_ENV */
55 #endif /* AFS_NT40_ENV */
73 #if defined(AFS_SUN_ENV) || defined(AFS_SUN5_ENV)
75 #include <sys/mnttab.h>
76 #include <sys/mntent.h>
82 #if defined(AFS_SGI_ENV)
87 #ifndef AFS_LINUX20_ENV
88 #include <fstab.h> /* Need to find in libc 5, present in libc 6 */
91 #endif /* AFS_SGI_ENV */
93 #endif /* AFS_HPUX_ENV */
97 #include <netinet/in.h>
101 #include <sys/time.h>
102 #endif /* ITIMER_REAL */
103 #endif /* AFS_NT40_ENV */
104 #if defined(AFS_SUN5_ENV) || defined(AFS_NT40_ENV) || defined(AFS_LINUX20_ENV)
111 #include <afs/errors.h>
114 #include <afs/afssyscalls.h>
116 #include <afs/afsutil.h>
120 #include "daemon_com.h"
122 #include "salvsync.h"
125 #include "partition.h"
126 #include "volume_inline.h"
127 #ifdef AFS_PTHREAD_ENV
129 #else /* AFS_PTHREAD_ENV */
130 #include "afs/assert.h"
131 #endif /* AFS_PTHREAD_ENV */
138 #if !defined(offsetof)
143 #define afs_stat stat64
144 #define afs_fstat fstat64
145 #define afs_open open64
146 #else /* !O_LARGEFILE */
147 #define afs_stat stat
148 #define afs_fstat fstat
149 #define afs_open open
150 #endif /* !O_LARGEFILE */
152 #ifdef AFS_PTHREAD_ENV
153 pthread_mutex_t vol_glock_mutex;
154 pthread_mutex_t vol_trans_mutex;
155 pthread_cond_t vol_put_volume_cond;
156 pthread_cond_t vol_sleep_cond;
157 int vol_attach_threads = 1;
158 #endif /* AFS_PTHREAD_ENV */
160 #ifdef AFS_DEMAND_ATTACH_FS
161 pthread_mutex_t vol_salvsync_mutex;
162 #endif /* AFS_DEMAND_ATTACH_FS */
165 extern void *calloc(), *realloc();
168 /*@printflike@*/ extern void Log(const char *format, ...);
170 /* Forward declarations */
171 static Volume *attach2(Error * ec, VolId vid, char *path,
172 register struct VolumeHeader *header,
173 struct DiskPartition64 *partp, Volume * vp,
174 int isbusy, int mode);
175 static void ReallyFreeVolume(Volume * vp);
176 #ifdef AFS_DEMAND_ATTACH_FS
177 static void FreeVolume(Volume * vp);
178 #else /* !AFS_DEMAND_ATTACH_FS */
179 #define FreeVolume(vp) ReallyFreeVolume(vp)
180 static void VScanUpdateList(void);
181 #endif /* !AFS_DEMAND_ATTACH_FS */
182 static void VInitVolumeHeaderCache(afs_uint32 howMany);
183 static int GetVolumeHeader(register Volume * vp);
184 static void ReleaseVolumeHeader(register struct volHeader *hd);
185 static void FreeVolumeHeader(register Volume * vp);
186 static void AddVolumeToHashTable(register Volume * vp, int hashid);
187 static void DeleteVolumeFromHashTable(register Volume * vp);
188 static int VHold(Volume * vp);
189 static int VHold_r(Volume * vp);
190 static void VGetBitmap_r(Error * ec, Volume * vp, VnodeClass class);
191 static void VReleaseVolumeHandles_r(Volume * vp);
192 static void VCloseVolumeHandles_r(Volume * vp);
193 static void LoadVolumeHeader(Error * ec, Volume * vp);
194 static int VCheckOffline(register Volume * vp);
195 static int VCheckDetach(register Volume * vp);
196 static Volume * GetVolume(Error * ec, Error * client_ec, VolId volumeId, Volume * hint, int flags);
197 static int VolumeExternalName_r(VolumeId volumeId, char * name, size_t len);
199 int LogLevel; /* Vice loglevel--not defined as extern so that it will be
200 * defined when not linked with vice, XXXX */
201 ProgramType programType; /* The type of program using the package */
203 /* extended volume package statistics */
206 #ifdef VOL_LOCK_DEBUG
207 pthread_t vol_glock_holder = 0;
211 #define VOLUME_BITMAP_GROWSIZE 16 /* bytes, => 128vnodes */
212 /* Must be a multiple of 4 (1 word) !! */
214 /* this parameter needs to be tunable at runtime.
215 * 128 was really inadequate for largish servers -- at 16384 volumes this
216 * puts average chain length at 128, thus an average 65 deref's to find a volptr.
217 * talk about bad spatial locality...
219 * an AVL or splay tree might work a lot better, but we'll just increase
220 * the default hash table size for now
222 #define DEFAULT_VOLUME_HASH_SIZE 256 /* Must be a power of 2!! */
223 #define DEFAULT_VOLUME_HASH_MASK (DEFAULT_VOLUME_HASH_SIZE-1)
224 #define VOLUME_HASH(volumeId) (volumeId&(VolumeHashTable.Mask))
227 * turn volume hash chains into partially ordered lists.
228 * when the threshold is exceeded between two adjacent elements,
229 * perform a chain rebalancing operation.
231 * keep the threshold high in order to keep cache line invalidates
232 * low "enough" on SMPs
234 #define VOLUME_HASH_REORDER_THRESHOLD 200
237 * when possible, don't just reorder single elements, but reorder
238 * entire chains of elements at once. a chain of elements that
239 * exceed the element previous to the pivot by at least CHAIN_THRESH
240 * accesses are moved in front of the chain whose elements have at
241 * least CHAIN_THRESH less accesses than the pivot element
243 #define VOLUME_HASH_REORDER_CHAIN_THRESH (VOLUME_HASH_REORDER_THRESHOLD / 2)
245 #include "rx/rx_queue.h"
248 VolumeHashTable_t VolumeHashTable = {
249 DEFAULT_VOLUME_HASH_SIZE,
250 DEFAULT_VOLUME_HASH_MASK,
255 static void VInitVolumeHash(void);
259 /* This macro is used where an ffs() call does not exist. Was in util/ffs.c */
263 afs_int32 ffs_tmp = x;
267 for (ffs_i = 1;; ffs_i++) {
274 #endif /* !AFS_HAVE_FFS */
276 #ifdef AFS_PTHREAD_ENV
277 typedef struct diskpartition_queue_t {
278 struct rx_queue queue;
279 struct DiskPartition64 * diskP;
280 } diskpartition_queue_t;
281 typedef struct vinitvolumepackage_thread_t {
282 struct rx_queue queue;
283 pthread_cond_t thread_done_cv;
284 int n_threads_complete;
285 } vinitvolumepackage_thread_t;
286 static void * VInitVolumePackageThread(void * args);
287 #endif /* AFS_PTHREAD_ENV */
289 static int VAttachVolumesByPartition(struct DiskPartition64 *diskP,
290 int * nAttached, int * nUnattached);
293 #ifdef AFS_DEMAND_ATTACH_FS
294 /* demand attach fileserver extensions */
297 * in the future we will support serialization of VLRU state into the fs_state
300 * these structures are the beginning of that effort
302 struct VLRU_DiskHeader {
303 struct versionStamp stamp; /* magic and structure version number */
304 afs_uint32 mtime; /* time of dump to disk */
305 afs_uint32 num_records; /* number of VLRU_DiskEntry records */
308 struct VLRU_DiskEntry {
309 afs_uint32 vid; /* volume ID */
310 afs_uint32 idx; /* generation */
311 afs_uint32 last_get; /* timestamp of last get */
314 struct VLRU_StartupQueue {
315 struct VLRU_DiskEntry * entry;
320 typedef struct vshutdown_thread_t {
322 pthread_mutex_t lock;
324 pthread_cond_t master_cv;
326 int n_threads_complete;
328 int schedule_version;
331 byte n_parts_done_pass;
332 byte part_thread_target[VOLMAXPARTS+1];
333 byte part_done_pass[VOLMAXPARTS+1];
334 struct rx_queue * part_pass_head[VOLMAXPARTS+1];
335 int stats[4][VOLMAXPARTS+1];
336 } vshutdown_thread_t;
337 static void * VShutdownThread(void * args);
340 static Volume * VAttachVolumeByVp_r(Error * ec, Volume * vp, int mode);
341 static int VCheckFree(Volume * vp);
344 static void AddVolumeToVByPList_r(Volume * vp);
345 static void DeleteVolumeFromVByPList_r(Volume * vp);
346 static void VVByPListBeginExclusive_r(struct DiskPartition64 * dp);
347 static void VVByPListEndExclusive_r(struct DiskPartition64 * dp);
348 static void VVByPListWait_r(struct DiskPartition64 * dp);
350 /* online salvager */
351 static int VCheckSalvage(register Volume * vp);
352 static int VUpdateSalvagePriority_r(Volume * vp);
353 static int VScheduleSalvage_r(Volume * vp);
354 static int VCancelSalvage_r(Volume * vp, int reason);
356 /* Volume hash table */
357 static void VReorderHash_r(VolumeHashChainHead * head, Volume * pp, Volume * vp);
358 static void VHashBeginExclusive_r(VolumeHashChainHead * head);
359 static void VHashEndExclusive_r(VolumeHashChainHead * head);
360 static void VHashWait_r(VolumeHashChainHead * head);
363 static int ShutdownVByPForPass_r(struct DiskPartition64 * dp, int pass);
364 static int ShutdownVolumeWalk_r(struct DiskPartition64 * dp, int pass,
365 struct rx_queue ** idx);
366 static void ShutdownController(vshutdown_thread_t * params);
367 static void ShutdownCreateSchedule(vshutdown_thread_t * params);
370 static void VLRU_ComputeConstants(void);
371 static void VInitVLRU(void);
372 static void VLRU_Init_Node_r(volatile Volume * vp);
373 static void VLRU_Add_r(volatile Volume * vp);
374 static void VLRU_Delete_r(volatile Volume * vp);
375 static void VLRU_UpdateAccess_r(volatile Volume * vp);
376 static void * VLRU_ScannerThread(void * args);
377 static void VLRU_Scan_r(int idx);
378 static void VLRU_Promote_r(int idx);
379 static void VLRU_Demote_r(int idx);
380 static void VLRU_SwitchQueues(volatile Volume * vp, int new_idx, int append);
383 static int VCheckSoftDetach(volatile Volume * vp, afs_uint32 thresh);
384 static int VCheckSoftDetachCandidate(volatile Volume * vp, afs_uint32 thresh);
385 static int VSoftDetachVolume_r(volatile Volume * vp, afs_uint32 thresh);
388 pthread_key_t VThread_key;
389 VThreadOptions_t VThread_defaults = {
390 0 /**< allow salvsync */
392 #endif /* AFS_DEMAND_ATTACH_FS */
395 struct Lock vol_listLock; /* Lock obtained when listing volumes:
396 * prevents a volume from being missed
397 * if the volume is attached during a
401 static int TimeZoneCorrection; /* Number of seconds west of GMT */
403 /* Common message used when the volume goes off line */
404 char *VSalvageMessage =
405 "Files in this volume are currently unavailable; call operations";
407 int VInit; /* 0 - uninitialized,
408 * 1 - initialized but not all volumes have been attached,
409 * 2 - initialized and all volumes have been attached,
410 * 3 - initialized, all volumes have been attached, and
411 * VConnectFS() has completed. */
414 bit32 VolumeCacheCheck; /* Incremented everytime a volume goes on line--
415 * used to stamp volume headers and in-core
416 * vnodes. When the volume goes on-line the
417 * vnode will be invalidated
418 * access only with VOL_LOCK held */
423 /***************************************************/
424 /* Startup routines */
425 /***************************************************/
428 VInitVolumePackage(ProgramType pt, afs_uint32 nLargeVnodes, afs_uint32 nSmallVnodes,
429 int connect, afs_uint32 volcache)
431 int errors = 0; /* Number of errors while finding vice partitions. */
437 memset(&VStats, 0, sizeof(VStats));
438 VStats.hdr_cache_size = 200;
440 VInitPartitionPackage();
442 #ifdef AFS_DEMAND_ATTACH_FS
443 if (programType == fileServer) {
446 VLRU_SetOptions(VLRU_SET_ENABLED, 0);
448 assert(pthread_key_create(&VThread_key, NULL) == 0);
451 #ifdef AFS_PTHREAD_ENV
452 assert(pthread_mutex_init(&vol_glock_mutex, NULL) == 0);
453 assert(pthread_mutex_init(&vol_trans_mutex, NULL) == 0);
454 assert(pthread_cond_init(&vol_put_volume_cond, NULL) == 0);
455 assert(pthread_cond_init(&vol_sleep_cond, NULL) == 0);
456 #else /* AFS_PTHREAD_ENV */
458 #endif /* AFS_PTHREAD_ENV */
459 Lock_Init(&vol_listLock);
461 srandom(time(0)); /* For VGetVolumeInfo */
462 gettimeofday(&tv, &tz);
463 TimeZoneCorrection = tz.tz_minuteswest * 60;
465 #ifdef AFS_DEMAND_ATTACH_FS
466 assert(pthread_mutex_init(&vol_salvsync_mutex, NULL) == 0);
467 #endif /* AFS_DEMAND_ATTACH_FS */
469 /* Ok, we have done enough initialization that fileserver can
470 * start accepting calls, even though the volumes may not be
471 * available just yet.
475 #if defined(AFS_DEMAND_ATTACH_FS) && defined(SALVSYNC_BUILD_SERVER)
476 if (programType == salvageServer) {
479 #endif /* AFS_DEMAND_ATTACH_FS */
480 #ifdef FSSYNC_BUILD_SERVER
481 if (programType == fileServer) {
485 #if defined(AFS_DEMAND_ATTACH_FS) && defined(SALVSYNC_BUILD_CLIENT)
486 if (programType == fileServer) {
487 /* establish a connection to the salvager at this point */
488 assert(VConnectSALV() != 0);
490 #endif /* AFS_DEMAND_ATTACH_FS */
492 if (volcache > VStats.hdr_cache_size)
493 VStats.hdr_cache_size = volcache;
494 VInitVolumeHeaderCache(VStats.hdr_cache_size);
496 VInitVnodes(vLarge, nLargeVnodes);
497 VInitVnodes(vSmall, nSmallVnodes);
500 errors = VAttachPartitions();
504 if (programType == fileServer) {
505 struct DiskPartition64 *diskP;
506 #ifdef AFS_PTHREAD_ENV
507 struct vinitvolumepackage_thread_t params;
508 struct diskpartition_queue_t * dpq;
509 int i, threads, parts;
511 pthread_attr_t attrs;
513 assert(pthread_cond_init(¶ms.thread_done_cv,NULL) == 0);
515 params.n_threads_complete = 0;
517 /* create partition work queue */
518 for (parts=0, diskP = DiskPartitionList; diskP; diskP = diskP->next, parts++) {
519 dpq = (diskpartition_queue_t *) malloc(sizeof(struct diskpartition_queue_t));
522 queue_Append(¶ms,dpq);
525 threads = MIN(parts, vol_attach_threads);
528 /* spawn off a bunch of initialization threads */
529 assert(pthread_attr_init(&attrs) == 0);
530 assert(pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED) == 0);
532 Log("VInitVolumePackage: beginning parallel fileserver startup\n");
533 #ifdef AFS_DEMAND_ATTACH_FS
534 Log("VInitVolumePackage: using %d threads to pre-attach volumes on %d partitions\n",
536 #else /* AFS_DEMAND_ATTACH_FS */
537 Log("VInitVolumePackage: using %d threads to attach volumes on %d partitions\n",
539 #endif /* AFS_DEMAND_ATTACH_FS */
542 for (i=0; i < threads; i++) {
543 assert(pthread_create
544 (&tid, &attrs, &VInitVolumePackageThread,
548 while(params.n_threads_complete < threads) {
549 VOL_CV_WAIT(¶ms.thread_done_cv);
553 assert(pthread_attr_destroy(&attrs) == 0);
555 /* if we're only going to run one init thread, don't bother creating
557 Log("VInitVolumePackage: beginning single-threaded fileserver startup\n");
558 #ifdef AFS_DEMAND_ATTACH_FS
559 Log("VInitVolumePackage: using 1 thread to pre-attach volumes on %d partition(s)\n",
561 #else /* AFS_DEMAND_ATTACH_FS */
562 Log("VInitVolumePackage: using 1 thread to attach volumes on %d partition(s)\n",
564 #endif /* AFS_DEMAND_ATTACH_FS */
566 VInitVolumePackageThread(¶ms);
569 assert(pthread_cond_destroy(¶ms.thread_done_cv) == 0);
571 #else /* AFS_PTHREAD_ENV */
575 /* Attach all the volumes in this partition */
576 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
577 int nAttached = 0, nUnattached = 0;
578 assert(VAttachVolumesByPartition(diskP, &nAttached, &nUnattached) == 0);
580 #endif /* AFS_PTHREAD_ENV */
583 VInit = 2; /* Initialized, and all volumes have been attached */
584 #ifdef FSSYNC_BUILD_CLIENT
585 if (programType == volumeUtility && connect) {
587 Log("Unable to connect to file server; will retry at need\n");
591 #ifdef AFS_DEMAND_ATTACH_FS
592 else if (programType == salvageServer) {
594 Log("Unable to connect to file server; aborted\n");
598 #endif /* AFS_DEMAND_ATTACH_FS */
599 #endif /* FSSYNC_BUILD_CLIENT */
603 #ifdef AFS_PTHREAD_ENV
605 VInitVolumePackageThread(void * args) {
606 int errors = 0; /* Number of errors while finding vice partitions. */
610 struct DiskPartition64 *diskP;
611 struct vinitvolumepackage_thread_t * params;
612 struct diskpartition_queue_t * dpq;
614 params = (vinitvolumepackage_thread_t *) args;
618 /* Attach all the volumes in this partition */
619 while (queue_IsNotEmpty(params)) {
620 int nAttached = 0, nUnattached = 0;
622 dpq = queue_First(params,diskpartition_queue_t);
628 assert(VAttachVolumesByPartition(diskP, &nAttached, &nUnattached) == 0);
633 params->n_threads_complete++;
634 pthread_cond_signal(¶ms->thread_done_cv);
638 #endif /* AFS_PTHREAD_ENV */
641 * attach all volumes on a given disk partition
644 VAttachVolumesByPartition(struct DiskPartition64 *diskP, int * nAttached, int * nUnattached)
650 Log("Partition %s: attaching volumes\n", diskP->name);
651 dirp = opendir(VPartitionPath(diskP));
653 Log("opendir on Partition %s failed!\n", diskP->name);
657 while ((dp = readdir(dirp))) {
659 p = strrchr(dp->d_name, '.');
660 if (p != NULL && strcmp(p, VHDREXT) == 0) {
663 #ifdef AFS_DEMAND_ATTACH_FS
664 vp = VPreAttachVolumeByName(&error, diskP->name, dp->d_name);
665 #else /* AFS_DEMAND_ATTACH_FS */
666 vp = VAttachVolumeByName(&error, diskP->name, dp->d_name,
668 #endif /* AFS_DEMAND_ATTACH_FS */
669 (*(vp ? nAttached : nUnattached))++;
670 if (error == VOFFLINE)
671 Log("Volume %d stays offline (/vice/offline/%s exists)\n", VolumeNumber(dp->d_name), dp->d_name);
672 else if (LogLevel >= 5) {
673 Log("Partition %s: attached volume %d (%s)\n",
674 diskP->name, VolumeNumber(dp->d_name),
677 #if !defined(AFS_DEMAND_ATTACH_FS)
681 #endif /* AFS_DEMAND_ATTACH_FS */
685 Log("Partition %s: attached %d volumes; %d volumes not attached\n", diskP->name, *nAttached, *nUnattached);
691 /***************************************************/
692 /* Shutdown routines */
693 /***************************************************/
697 * highly multithreaded volume package shutdown
699 * with the demand attach fileserver extensions,
700 * VShutdown has been modified to be multithreaded.
701 * In order to achieve optimal use of many threads,
702 * the shutdown code involves one control thread and
703 * n shutdown worker threads. The control thread
704 * periodically examines the number of volumes available
705 * for shutdown on each partition, and produces a worker
706 * thread allocation schedule. The idea is to eliminate
707 * redundant scheduling computation on the workers by
708 * having a single master scheduler.
710 * The scheduler's objectives are:
712 * each partition with volumes remaining gets allocated
713 * at least 1 thread (assuming sufficient threads)
715 * threads are allocated proportional to the number of
716 * volumes remaining to be offlined. This ensures that
717 * the OS I/O scheduler has many requests to elevator
718 * seek on partitions that will (presumably) take the
719 * longest amount of time (from now) to finish shutdown
720 * (3) keep threads busy
721 * when there are extra threads, they are assigned to
722 * partitions using a simple round-robin algorithm
724 * In the future, we may wish to add the ability to adapt
725 * to the relative performance patterns of each disk
730 * multi-step shutdown process
732 * demand attach shutdown is a four-step process. Each
733 * shutdown "pass" shuts down increasingly more difficult
734 * volumes. The main purpose is to achieve better cache
735 * utilization during shutdown.
738 * shutdown volumes in the unattached, pre-attached
741 * shutdown attached volumes with cached volume headers
743 * shutdown all volumes in non-exclusive states
745 * shutdown all remaining volumes
752 register Volume *vp, *np;
753 register afs_int32 code;
754 #ifdef AFS_DEMAND_ATTACH_FS
755 struct DiskPartition64 * diskP;
756 struct diskpartition_queue_t * dpq;
757 vshutdown_thread_t params;
759 pthread_attr_t attrs;
761 memset(¶ms, 0, sizeof(vshutdown_thread_t));
763 for (params.n_parts=0, diskP = DiskPartitionList;
764 diskP; diskP = diskP->next, params.n_parts++);
766 Log("VShutdown: shutting down on-line volumes on %d partition%s...\n",
767 params.n_parts, params.n_parts > 1 ? "s" : "");
769 if (vol_attach_threads > 1) {
770 /* prepare for parallel shutdown */
771 params.n_threads = vol_attach_threads;
772 assert(pthread_mutex_init(¶ms.lock, NULL) == 0);
773 assert(pthread_cond_init(¶ms.cv, NULL) == 0);
774 assert(pthread_cond_init(¶ms.master_cv, NULL) == 0);
775 assert(pthread_attr_init(&attrs) == 0);
776 assert(pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED) == 0);
779 /* setup the basic partition information structures for
780 * parallel shutdown */
781 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
783 struct rx_queue * qp, * nqp;
787 VVByPListWait_r(diskP);
788 VVByPListBeginExclusive_r(diskP);
791 for (queue_Scan(&diskP->vol_list, qp, nqp, rx_queue)) {
792 vp = (Volume *)((char *)qp - offsetof(Volume, vol_list));
796 Log("VShutdown: partition %s has %d volumes with attached headers\n",
797 VPartitionPath(diskP), count);
800 /* build up the pass 0 shutdown work queue */
801 dpq = (struct diskpartition_queue_t *) malloc(sizeof(struct diskpartition_queue_t));
804 queue_Prepend(¶ms, dpq);
806 params.part_pass_head[diskP->index] = queue_First(&diskP->vol_list, rx_queue);
809 Log("VShutdown: beginning parallel fileserver shutdown\n");
810 Log("VShutdown: using %d threads to offline volumes on %d partition%s\n",
811 vol_attach_threads, params.n_parts, params.n_parts > 1 ? "s" : "" );
813 /* do pass 0 shutdown */
814 assert(pthread_mutex_lock(¶ms.lock) == 0);
815 for (i=0; i < params.n_threads; i++) {
816 assert(pthread_create
817 (&tid, &attrs, &VShutdownThread,
821 /* wait for all the pass 0 shutdowns to complete */
822 while (params.n_threads_complete < params.n_threads) {
823 assert(pthread_cond_wait(¶ms.master_cv, ¶ms.lock) == 0);
825 params.n_threads_complete = 0;
827 assert(pthread_cond_broadcast(¶ms.cv) == 0);
828 assert(pthread_mutex_unlock(¶ms.lock) == 0);
830 Log("VShutdown: pass 0 completed using the 1 thread per partition algorithm\n");
831 Log("VShutdown: starting passes 1 through 3 using finely-granular mp-fast algorithm\n");
833 /* run the parallel shutdown scheduler. it will drop the glock internally */
834 ShutdownController(¶ms);
836 /* wait for all the workers to finish pass 3 and terminate */
837 while (params.pass < 4) {
838 VOL_CV_WAIT(¶ms.cv);
841 assert(pthread_attr_destroy(&attrs) == 0);
842 assert(pthread_cond_destroy(¶ms.cv) == 0);
843 assert(pthread_cond_destroy(¶ms.master_cv) == 0);
844 assert(pthread_mutex_destroy(¶ms.lock) == 0);
846 /* drop the VByPList exclusive reservations */
847 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
848 VVByPListEndExclusive_r(diskP);
849 Log("VShutdown: %s stats : (pass[0]=%d, pass[1]=%d, pass[2]=%d, pass[3]=%d)\n",
850 VPartitionPath(diskP),
851 params.stats[0][diskP->index],
852 params.stats[1][diskP->index],
853 params.stats[2][diskP->index],
854 params.stats[3][diskP->index]);
857 Log("VShutdown: shutdown finished using %d threads\n", params.n_threads);
859 /* if we're only going to run one shutdown thread, don't bother creating
861 Log("VShutdown: beginning single-threaded fileserver shutdown\n");
863 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
864 VShutdownByPartition_r(diskP);
868 Log("VShutdown: complete.\n");
869 #else /* AFS_DEMAND_ATTACH_FS */
870 Log("VShutdown: shutting down on-line volumes...\n");
871 for (i = 0; i < VolumeHashTable.Size; i++) {
872 /* try to hold first volume in the hash table */
873 for (queue_Scan(&VolumeHashTable.Table[i],vp,np,Volume)) {
877 Log("VShutdown: Attempting to take volume %u offline.\n",
880 /* next, take the volume offline (drops reference count) */
881 VOffline_r(vp, "File server was shut down");
885 Log("VShutdown: complete.\n");
886 #endif /* AFS_DEMAND_ATTACH_FS */
897 #ifdef AFS_DEMAND_ATTACH_FS
900 * shutdown control thread
903 ShutdownController(vshutdown_thread_t * params)
906 struct DiskPartition64 * diskP;
908 vshutdown_thread_t shadow;
910 ShutdownCreateSchedule(params);
912 while ((params->pass < 4) &&
913 (params->n_threads_complete < params->n_threads)) {
914 /* recompute schedule once per second */
916 memcpy(&shadow, params, sizeof(vshutdown_thread_t));
920 Log("ShutdownController: schedule version=%d, vol_remaining=%d, pass=%d\n",
921 shadow.schedule_version, shadow.vol_remaining, shadow.pass);
922 Log("ShutdownController: n_threads_complete=%d, n_parts_done_pass=%d\n",
923 shadow.n_threads_complete, shadow.n_parts_done_pass);
924 for (diskP = DiskPartitionList; diskP; diskP=diskP->next) {
926 Log("ShutdownController: part[%d] : (len=%d, thread_target=%d, done_pass=%d, pass_head=%p)\n",
929 shadow.part_thread_target[id],
930 shadow.part_done_pass[id],
931 shadow.part_pass_head[id]);
937 ShutdownCreateSchedule(params);
941 /* create the shutdown thread work schedule.
942 * this scheduler tries to implement fairness
943 * by allocating at least 1 thread to each
944 * partition with volumes to be shutdown,
945 * and then it attempts to allocate remaining
946 * threads based upon the amount of work left
949 ShutdownCreateSchedule(vshutdown_thread_t * params)
951 struct DiskPartition64 * diskP;
952 int sum, thr_workload, thr_left;
953 int part_residue[VOLMAXPARTS+1];
956 /* compute the total number of outstanding volumes */
958 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
959 sum += diskP->vol_list.len;
962 params->schedule_version++;
963 params->vol_remaining = sum;
968 /* compute average per-thread workload */
969 thr_workload = sum / params->n_threads;
970 if (sum % params->n_threads)
973 thr_left = params->n_threads;
974 memset(&part_residue, 0, sizeof(part_residue));
976 /* for fairness, give every partition with volumes remaining
977 * at least one thread */
978 for (diskP = DiskPartitionList; diskP && thr_left; diskP = diskP->next) {
980 if (diskP->vol_list.len) {
981 params->part_thread_target[id] = 1;
984 params->part_thread_target[id] = 0;
988 if (thr_left && thr_workload) {
989 /* compute length-weighted workloads */
992 for (diskP = DiskPartitionList; diskP && thr_left; diskP = diskP->next) {
994 delta = (diskP->vol_list.len / thr_workload) -
995 params->part_thread_target[id];
999 if (delta < thr_left) {
1000 params->part_thread_target[id] += delta;
1003 params->part_thread_target[id] += thr_left;
1011 /* try to assign any leftover threads to partitions that
1012 * had volume lengths closer to needing thread_target+1 */
1013 int max_residue, max_id;
1015 /* compute the residues */
1016 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1018 part_residue[id] = diskP->vol_list.len -
1019 (params->part_thread_target[id] * thr_workload);
1022 /* now try to allocate remaining threads to partitions with the
1023 * highest residues */
1026 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1028 if (part_residue[id] > max_residue) {
1029 max_residue = part_residue[id];
1038 params->part_thread_target[max_id]++;
1040 part_residue[max_id] = 0;
1045 /* punt and give any remaining threads equally to each partition */
1047 if (thr_left >= params->n_parts) {
1048 alloc = thr_left / params->n_parts;
1049 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1051 params->part_thread_target[id] += alloc;
1056 /* finish off the last of the threads */
1057 for (diskP = DiskPartitionList; thr_left && diskP; diskP = diskP->next) {
1059 params->part_thread_target[id]++;
1065 /* worker thread for parallel shutdown */
1067 VShutdownThread(void * args)
1069 struct rx_queue *qp;
1071 vshutdown_thread_t * params;
1072 int part, code, found, pass, schedule_version_save, count;
1073 struct DiskPartition64 *diskP;
1074 struct diskpartition_queue_t * dpq;
1077 params = (vshutdown_thread_t *) args;
1079 /* acquire the shutdown pass 0 lock */
1080 assert(pthread_mutex_lock(¶ms->lock) == 0);
1082 /* if there's still pass 0 work to be done,
1083 * get a work entry, and do a pass 0 shutdown */
1084 if (queue_IsNotEmpty(params)) {
1085 dpq = queue_First(params, diskpartition_queue_t);
1087 assert(pthread_mutex_unlock(¶ms->lock) == 0);
1093 while (ShutdownVolumeWalk_r(diskP, 0, ¶ms->part_pass_head[id]))
1095 params->stats[0][diskP->index] = count;
1096 assert(pthread_mutex_lock(¶ms->lock) == 0);
1099 params->n_threads_complete++;
1100 if (params->n_threads_complete == params->n_threads) {
1101 /* notify control thread that all workers have completed pass 0 */
1102 assert(pthread_cond_signal(¶ms->master_cv) == 0);
1104 while (params->pass == 0) {
1105 assert(pthread_cond_wait(¶ms->cv, ¶ms->lock) == 0);
1109 assert(pthread_mutex_unlock(¶ms->lock) == 0);
1112 pass = params->pass;
1115 /* now escalate through the more complicated shutdowns */
1117 schedule_version_save = params->schedule_version;
1119 /* find a disk partition to work on */
1120 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1122 if (params->part_thread_target[id] && !params->part_done_pass[id]) {
1123 params->part_thread_target[id]--;
1130 /* hmm. for some reason the controller thread couldn't find anything for
1131 * us to do. let's see if there's anything we can do */
1132 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1134 if (diskP->vol_list.len && !params->part_done_pass[id]) {
1137 } else if (!params->part_done_pass[id]) {
1138 params->part_done_pass[id] = 1;
1139 params->n_parts_done_pass++;
1141 Log("VShutdown: done shutting down volumes on partition %s.\n",
1142 VPartitionPath(diskP));
1148 /* do work on this partition until either the controller
1149 * creates a new schedule, or we run out of things to do
1150 * on this partition */
1153 while (!params->part_done_pass[id] &&
1154 (schedule_version_save == params->schedule_version)) {
1155 /* ShutdownVolumeWalk_r will drop the glock internally */
1156 if (!ShutdownVolumeWalk_r(diskP, pass, ¶ms->part_pass_head[id])) {
1157 if (!params->part_done_pass[id]) {
1158 params->part_done_pass[id] = 1;
1159 params->n_parts_done_pass++;
1161 Log("VShutdown: done shutting down volumes on partition %s.\n",
1162 VPartitionPath(diskP));
1170 params->stats[pass][id] += count;
1172 /* ok, everyone is done this pass, proceed */
1175 params->n_threads_complete++;
1176 while (params->pass == pass) {
1177 if (params->n_threads_complete == params->n_threads) {
1178 /* we are the last thread to complete, so we will
1179 * reinitialize worker pool state for the next pass */
1180 params->n_threads_complete = 0;
1181 params->n_parts_done_pass = 0;
1183 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
1185 params->part_done_pass[id] = 0;
1186 params->part_pass_head[id] = queue_First(&diskP->vol_list, rx_queue);
1189 /* compute a new thread schedule before releasing all the workers */
1190 ShutdownCreateSchedule(params);
1192 /* wake up all the workers */
1193 assert(pthread_cond_broadcast(¶ms->cv) == 0);
1196 Log("VShutdown: pass %d completed using %d threads on %d partitions\n",
1197 pass, params->n_threads, params->n_parts);
1200 VOL_CV_WAIT(¶ms->cv);
1203 pass = params->pass;
1217 /* shut down all volumes on a given disk partition
1219 * note that this function will not allow mp-fast
1220 * shutdown of a partition */
1222 VShutdownByPartition_r(struct DiskPartition64 * dp)
1228 /* wait for other exclusive ops to finish */
1229 VVByPListWait_r(dp);
1231 /* begin exclusive access */
1232 VVByPListBeginExclusive_r(dp);
1234 /* pick the low-hanging fruit first,
1235 * then do the complicated ones last
1236 * (has the advantage of keeping
1237 * in-use volumes up until the bitter end) */
1238 for (pass = 0, total=0; pass < 4; pass++) {
1239 pass_stats[pass] = ShutdownVByPForPass_r(dp, pass);
1240 total += pass_stats[pass];
1243 /* end exclusive access */
1244 VVByPListEndExclusive_r(dp);
1246 Log("VShutdownByPartition: shut down %d volumes on %s (pass[0]=%d, pass[1]=%d, pass[2]=%d, pass[3]=%d)\n",
1247 total, VPartitionPath(dp), pass_stats[0], pass_stats[1], pass_stats[2], pass_stats[3]);
1252 /* internal shutdown functionality
1254 * for multi-pass shutdown:
1255 * 0 to only "shutdown" {pre,un}attached and error state volumes
1256 * 1 to also shutdown attached volumes w/ volume header loaded
1257 * 2 to also shutdown attached volumes w/o volume header loaded
1258 * 3 to also shutdown exclusive state volumes
1260 * caller MUST hold exclusive access on the hash chain
1261 * because we drop vol_glock_mutex internally
1263 * this function is reentrant for passes 1--3
1264 * (e.g. multiple threads can cooperate to
1265 * shutdown a partition mp-fast)
1267 * pass 0 is not scaleable because the volume state data is
1268 * synchronized by vol_glock mutex, and the locking overhead
1269 * is too high to drop the lock long enough to do linked list
1273 ShutdownVByPForPass_r(struct DiskPartition64 * dp, int pass)
1275 struct rx_queue * q = queue_First(&dp->vol_list, rx_queue);
1278 while (ShutdownVolumeWalk_r(dp, pass, &q))
1284 /* conditionally shutdown one volume on partition dp
1285 * returns 1 if a volume was shutdown in this pass,
1288 ShutdownVolumeWalk_r(struct DiskPartition64 * dp, int pass,
1289 struct rx_queue ** idx)
1291 struct rx_queue *qp, *nqp;
1296 for (queue_ScanFrom(&dp->vol_list, qp, qp, nqp, rx_queue)) {
1297 vp = (Volume *) (((char *)qp) - offsetof(Volume, vol_list));
1301 if ((V_attachState(vp) != VOL_STATE_UNATTACHED) &&
1302 (V_attachState(vp) != VOL_STATE_ERROR) &&
1303 (V_attachState(vp) != VOL_STATE_PREATTACHED)) {
1307 if ((V_attachState(vp) == VOL_STATE_ATTACHED) &&
1308 (vp->header == NULL)) {
1312 if (VIsExclusiveState(V_attachState(vp))) {
1317 DeleteVolumeFromVByPList_r(vp);
1318 VShutdownVolume_r(vp);
1328 * shutdown a specific volume
1330 /* caller MUST NOT hold a heavyweight ref on vp */
1332 VShutdownVolume_r(Volume * vp)
1336 VCreateReservation_r(vp);
1338 if (LogLevel >= 5) {
1339 Log("VShutdownVolume_r: vid=%u, device=%d, state=%hu\n",
1340 vp->hashid, vp->partition->device, V_attachState(vp));
1343 /* wait for other blocking ops to finish */
1344 VWaitExclusiveState_r(vp);
1346 assert(VIsValidState(V_attachState(vp)));
1348 switch(V_attachState(vp)) {
1349 case VOL_STATE_SALVAGING:
1350 /* make sure salvager knows we don't want
1351 * the volume back */
1352 VCancelSalvage_r(vp, SALVSYNC_SHUTDOWN);
1353 case VOL_STATE_PREATTACHED:
1354 case VOL_STATE_ERROR:
1355 VChangeState_r(vp, VOL_STATE_UNATTACHED);
1356 case VOL_STATE_UNATTACHED:
1358 case VOL_STATE_GOING_OFFLINE:
1359 case VOL_STATE_SHUTTING_DOWN:
1360 case VOL_STATE_ATTACHED:
1364 Log("VShutdown: Attempting to take volume %u offline.\n",
1367 /* take the volume offline (drops reference count) */
1368 VOffline_r(vp, "File server was shut down");
1373 VCancelReservation_r(vp);
1377 #endif /* AFS_DEMAND_ATTACH_FS */
1380 /***************************************************/
1381 /* Header I/O routines */
1382 /***************************************************/
1384 /* open a descriptor for the inode (h),
1385 * read in an on-disk structure into buffer (to) of size (size),
1386 * verify versionstamp in structure has magic (magic) and
1387 * optionally verify version (version) if (version) is nonzero
1390 ReadHeader(Error * ec, IHandle_t * h, char *to, int size, bit32 magic,
1393 struct versionStamp *vsn;
1408 if (FDH_SEEK(fdP, 0, SEEK_SET) < 0) {
1410 FDH_REALLYCLOSE(fdP);
1413 vsn = (struct versionStamp *)to;
1414 if (FDH_READ(fdP, to, size) != size || vsn->magic != magic) {
1416 FDH_REALLYCLOSE(fdP);
1421 /* Check is conditional, in case caller wants to inspect version himself */
1422 if (version && vsn->version != version) {
1428 WriteVolumeHeader_r(Error * ec, Volume * vp)
1430 IHandle_t *h = V_diskDataHandle(vp);
1440 if (FDH_SEEK(fdP, 0, SEEK_SET) < 0) {
1442 FDH_REALLYCLOSE(fdP);
1445 if (FDH_WRITE(fdP, (char *)&V_disk(vp), sizeof(V_disk(vp)))
1446 != sizeof(V_disk(vp))) {
1448 FDH_REALLYCLOSE(fdP);
1454 /* VolumeHeaderToDisk
1455 * Allows for storing 64 bit inode numbers in on-disk volume header
1458 /* convert in-memory representation of a volume header to the
1459 * on-disk representation of a volume header */
1461 VolumeHeaderToDisk(VolumeDiskHeader_t * dh, VolumeHeader_t * h)
1464 memset((char *)dh, 0, sizeof(VolumeDiskHeader_t));
1465 dh->stamp = h->stamp;
1467 dh->parent = h->parent;
1469 #ifdef AFS_64BIT_IOPS_ENV
1470 dh->volumeInfo_lo = (afs_int32) h->volumeInfo & 0xffffffff;
1471 dh->volumeInfo_hi = (afs_int32) (h->volumeInfo >> 32) & 0xffffffff;
1472 dh->smallVnodeIndex_lo = (afs_int32) h->smallVnodeIndex & 0xffffffff;
1473 dh->smallVnodeIndex_hi =
1474 (afs_int32) (h->smallVnodeIndex >> 32) & 0xffffffff;
1475 dh->largeVnodeIndex_lo = (afs_int32) h->largeVnodeIndex & 0xffffffff;
1476 dh->largeVnodeIndex_hi =
1477 (afs_int32) (h->largeVnodeIndex >> 32) & 0xffffffff;
1478 dh->linkTable_lo = (afs_int32) h->linkTable & 0xffffffff;
1479 dh->linkTable_hi = (afs_int32) (h->linkTable >> 32) & 0xffffffff;
1481 dh->volumeInfo_lo = h->volumeInfo;
1482 dh->smallVnodeIndex_lo = h->smallVnodeIndex;
1483 dh->largeVnodeIndex_lo = h->largeVnodeIndex;
1484 dh->linkTable_lo = h->linkTable;
1488 /* DiskToVolumeHeader
1489 * Converts an on-disk representation of a volume header to
1490 * the in-memory representation of a volume header.
1492 * Makes the assumption that AFS has *always*
1493 * zero'd the volume header file so that high parts of inode
1494 * numbers are 0 in older (SGI EFS) volume header files.
1497 DiskToVolumeHeader(VolumeHeader_t * h, VolumeDiskHeader_t * dh)
1499 memset((char *)h, 0, sizeof(VolumeHeader_t));
1500 h->stamp = dh->stamp;
1502 h->parent = dh->parent;
1504 #ifdef AFS_64BIT_IOPS_ENV
1506 (Inode) dh->volumeInfo_lo | ((Inode) dh->volumeInfo_hi << 32);
1508 h->smallVnodeIndex =
1509 (Inode) dh->smallVnodeIndex_lo | ((Inode) dh->
1510 smallVnodeIndex_hi << 32);
1512 h->largeVnodeIndex =
1513 (Inode) dh->largeVnodeIndex_lo | ((Inode) dh->
1514 largeVnodeIndex_hi << 32);
1516 (Inode) dh->linkTable_lo | ((Inode) dh->linkTable_hi << 32);
1518 h->volumeInfo = dh->volumeInfo_lo;
1519 h->smallVnodeIndex = dh->smallVnodeIndex_lo;
1520 h->largeVnodeIndex = dh->largeVnodeIndex_lo;
1521 h->linkTable = dh->linkTable_lo;
1526 /***************************************************/
1527 /* Volume Attachment routines */
1528 /***************************************************/
1530 #ifdef AFS_DEMAND_ATTACH_FS
1532 * pre-attach a volume given its path.
1534 * @param[out] ec outbound error code
1535 * @param[in] partition partition path string
1536 * @param[in] name volume id string
1538 * @return volume object pointer
1540 * @note A pre-attached volume will only have its partition
1541 * and hashid fields initialized. At first call to
1542 * VGetVolume, the volume will be fully attached.
1546 VPreAttachVolumeByName(Error * ec, char *partition, char *name)
1550 vp = VPreAttachVolumeByName_r(ec, partition, name);
1556 * pre-attach a volume given its path.
1558 * @param[out] ec outbound error code
1559 * @param[in] partition path to vice partition
1560 * @param[in] name volume id string
1562 * @return volume object pointer
1564 * @pre VOL_LOCK held
1566 * @internal volume package internal use only.
1569 VPreAttachVolumeByName_r(Error * ec, char *partition, char *name)
1571 return VPreAttachVolumeById_r(ec,
1573 VolumeNumber(name));
1577 * pre-attach a volume given its path and numeric volume id.
1579 * @param[out] ec error code return
1580 * @param[in] partition path to vice partition
1581 * @param[in] volumeId numeric volume id
1583 * @return volume object pointer
1585 * @pre VOL_LOCK held
1587 * @internal volume package internal use only.
1590 VPreAttachVolumeById_r(Error * ec,
1595 struct DiskPartition64 *partp;
1599 assert(programType == fileServer);
1601 if (!(partp = VGetPartition_r(partition, 0))) {
1603 Log("VPreAttachVolumeById_r: Error getting partition (%s)\n", partition);
1607 vp = VLookupVolume_r(ec, volumeId, NULL);
1612 return VPreAttachVolumeByVp_r(ec, partp, vp, volumeId);
1616 * preattach a volume.
1618 * @param[out] ec outbound error code
1619 * @param[in] partp pointer to partition object
1620 * @param[in] vp pointer to volume object
1621 * @param[in] vid volume id
1623 * @return volume object pointer
1625 * @pre VOL_LOCK is held.
1627 * @warning Returned volume object pointer does not have to
1628 * equal the pointer passed in as argument vp. There
1629 * are potential race conditions which can result in
1630 * the pointers having different values. It is up to
1631 * the caller to make sure that references are handled
1632 * properly in this case.
1634 * @note If there is already a volume object registered with
1635 * the same volume id, its pointer MUST be passed as
1636 * argument vp. Failure to do so will result in a silent
1637 * failure to preattach.
1639 * @internal volume package internal use only.
1642 VPreAttachVolumeByVp_r(Error * ec,
1643 struct DiskPartition64 * partp,
1651 /* check to see if pre-attach already happened */
1653 (V_attachState(vp) != VOL_STATE_UNATTACHED) &&
1654 (V_attachState(vp) != VOL_STATE_PREATTACHED) &&
1655 !VIsErrorState(V_attachState(vp))) {
1657 * pre-attach is a no-op in all but the following cases:
1659 * - volume is unattached
1660 * - volume is in an error state
1661 * - volume is pre-attached
1663 Log("VPreattachVolumeByVp_r: volume %u not in quiescent state\n", vid);
1666 /* we're re-attaching a volume; clear out some old state */
1667 memset(&vp->salvage, 0, sizeof(struct VolumeOnlineSalvage));
1669 if (V_partition(vp) != partp) {
1670 /* XXX potential race */
1671 DeleteVolumeFromVByPList_r(vp);
1674 /* if we need to allocate a new Volume struct,
1675 * go ahead and drop the vol glock, otherwise
1676 * do the basic setup synchronised, as it's
1677 * probably not worth dropping the lock */
1680 /* allocate the volume structure */
1681 vp = nvp = (Volume *) malloc(sizeof(Volume));
1683 memset(vp, 0, sizeof(Volume));
1684 queue_Init(&vp->vnode_list);
1685 assert(pthread_cond_init(&V_attachCV(vp), NULL) == 0);
1688 /* link the volume with its associated vice partition */
1689 vp->device = partp->device;
1690 vp->partition = partp;
1693 vp->specialStatus = 0;
1695 /* if we dropped the lock, reacquire the lock,
1696 * check for pre-attach races, and then add
1697 * the volume to the hash table */
1700 nvp = VLookupVolume_r(ec, vid, NULL);
1705 } else if (nvp) { /* race detected */
1710 /* hack to make up for VChangeState_r() decrementing
1711 * the old state counter */
1712 VStats.state_levels[0]++;
1716 /* put pre-attached volume onto the hash table
1717 * and bring it up to the pre-attached state */
1718 AddVolumeToHashTable(vp, vp->hashid);
1719 AddVolumeToVByPList_r(vp);
1720 VLRU_Init_Node_r(vp);
1721 VChangeState_r(vp, VOL_STATE_PREATTACHED);
1724 Log("VPreAttachVolumeByVp_r: volume %u pre-attached\n", vp->hashid);
1732 #endif /* AFS_DEMAND_ATTACH_FS */
1734 /* Attach an existing volume, given its pathname, and return a
1735 pointer to the volume header information. The volume also
1736 normally goes online at this time. An offline volume
1737 must be reattached to make it go online */
1739 VAttachVolumeByName(Error * ec, char *partition, char *name, int mode)
1743 retVal = VAttachVolumeByName_r(ec, partition, name, mode);
1749 VAttachVolumeByName_r(Error * ec, char *partition, char *name, int mode)
1751 register Volume *vp = NULL, *svp = NULL;
1753 struct afs_stat status;
1754 struct VolumeDiskHeader diskHeader;
1755 struct VolumeHeader iheader;
1756 struct DiskPartition64 *partp;
1760 #ifdef AFS_DEMAND_ATTACH_FS
1761 VolumeStats stats_save;
1762 #endif /* AFS_DEMAND_ATTACH_FS */
1766 volumeId = VolumeNumber(name);
1768 if (!(partp = VGetPartition_r(partition, 0))) {
1770 Log("VAttachVolume: Error getting partition (%s)\n", partition);
1774 if (programType == volumeUtility) {
1776 VLockPartition_r(partition);
1777 } else if (programType == fileServer) {
1778 #ifdef AFS_DEMAND_ATTACH_FS
1779 /* lookup the volume in the hash table */
1780 vp = VLookupVolume_r(ec, volumeId, NULL);
1786 /* save any counters that are supposed to
1787 * be monotonically increasing over the
1788 * lifetime of the fileserver */
1789 memcpy(&stats_save, &vp->stats, sizeof(VolumeStats));
1791 memset(&stats_save, 0, sizeof(VolumeStats));
1794 /* if there's something in the hash table, and it's not
1795 * in the pre-attach state, then we may need to detach
1796 * it before proceeding */
1797 if (vp && (V_attachState(vp) != VOL_STATE_PREATTACHED)) {
1798 VCreateReservation_r(vp);
1799 VWaitExclusiveState_r(vp);
1801 /* at this point state must be one of:
1810 if (vp->specialStatus == VBUSY)
1813 /* if it's already attached, see if we can return it */
1814 if (V_attachState(vp) == VOL_STATE_ATTACHED) {
1815 VGetVolumeByVp_r(ec, vp);
1816 if (V_inUse(vp) == fileServer) {
1817 VCancelReservation_r(vp);
1821 /* otherwise, we need to detach, and attempt to re-attach */
1822 VDetachVolume_r(ec, vp);
1824 Log("VAttachVolume: Error detaching old volume instance (%s)\n", name);
1827 /* if it isn't fully attached, delete from the hash tables,
1828 and let the refcounter handle the rest */
1829 DeleteVolumeFromHashTable(vp);
1830 DeleteVolumeFromVByPList_r(vp);
1833 VCancelReservation_r(vp);
1837 /* pre-attach volume if it hasn't been done yet */
1839 (V_attachState(vp) == VOL_STATE_UNATTACHED) ||
1840 (V_attachState(vp) == VOL_STATE_ERROR)) {
1842 vp = VPreAttachVolumeByVp_r(ec, partp, vp, volumeId);
1850 /* handle pre-attach races
1852 * multiple threads can race to pre-attach a volume,
1853 * but we can't let them race beyond that
1855 * our solution is to let the first thread to bring
1856 * the volume into an exclusive state win; the other
1857 * threads just wait until it finishes bringing the
1858 * volume online, and then they do a vgetvolumebyvp
1860 if (svp && (svp != vp)) {
1861 /* wait for other exclusive ops to finish */
1862 VCreateReservation_r(vp);
1863 VWaitExclusiveState_r(vp);
1865 /* get a heavyweight ref, kill the lightweight ref, and return */
1866 VGetVolumeByVp_r(ec, vp);
1867 VCancelReservation_r(vp);
1871 /* at this point, we are chosen as the thread to do
1872 * demand attachment for this volume. all other threads
1873 * doing a getvolume on vp->hashid will block until we finish */
1875 /* make sure any old header cache entries are invalidated
1876 * before proceeding */
1877 FreeVolumeHeader(vp);
1879 VChangeState_r(vp, VOL_STATE_ATTACHING);
1881 /* restore any saved counters */
1882 memcpy(&vp->stats, &stats_save, sizeof(VolumeStats));
1883 #else /* AFS_DEMAND_ATTACH_FS */
1884 vp = VGetVolume_r(ec, volumeId);
1886 if (V_inUse(vp) == fileServer)
1888 if (vp->specialStatus == VBUSY)
1890 VDetachVolume_r(ec, vp);
1892 Log("VAttachVolume: Error detaching volume (%s)\n", name);
1896 #endif /* AFS_DEMAND_ATTACH_FS */
1900 strcpy(path, VPartitionPath(partp));
1906 if ((fd = afs_open(path, O_RDONLY)) == -1 || afs_fstat(fd, &status) == -1) {
1907 Log("VAttachVolume: Failed to open %s (errno %d)\n", path, errno);
1914 n = read(fd, &diskHeader, sizeof(diskHeader));
1916 if (n != sizeof(diskHeader)
1917 || diskHeader.stamp.magic != VOLUMEHEADERMAGIC) {
1918 Log("VAttachVolume: Error reading volume header %s\n", path);
1923 if (diskHeader.stamp.version != VOLUMEHEADERVERSION) {
1924 Log("VAttachVolume: Volume %s, version number is incorrect; volume needs salvaged\n", path);
1930 DiskToVolumeHeader(&iheader, &diskHeader);
1931 #ifdef FSSYNC_BUILD_CLIENT
1932 if (programType == volumeUtility && mode != V_SECRETLY && mode != V_PEEK) {
1934 if (FSYNC_VolOp(iheader.id, partition, FSYNC_VOL_NEEDVOLUME, mode, NULL)
1936 Log("VAttachVolume: attach of volume %u apparently denied by file server\n", iheader.id);
1937 *ec = VNOVOL; /* XXXX */
1945 vp = (Volume *) calloc(1, sizeof(Volume));
1947 vp->device = partp->device;
1948 vp->partition = partp;
1949 queue_Init(&vp->vnode_list);
1950 #ifdef AFS_DEMAND_ATTACH_FS
1951 assert(pthread_cond_init(&V_attachCV(vp), NULL) == 0);
1952 #endif /* AFS_DEMAND_ATTACH_FS */
1955 /* attach2 is entered without any locks, and returns
1956 * with vol_glock_mutex held */
1957 vp = attach2(ec, volumeId, path, &iheader, partp, vp, isbusy, mode);
1959 if (programType == volumeUtility && vp) {
1960 if ((mode == V_VOLUPD) || (VolumeWriteable(vp) && (mode == V_CLONE))) {
1961 /* mark volume header as in use so that volser crashes lead to a
1962 * salvage attempt */
1963 VUpdateVolume_r(ec, vp, 0);
1965 #ifdef AFS_DEMAND_ATTACH_FS
1966 /* for dafs, we should tell the fileserver, except for V_PEEK
1967 * where we know it is not necessary */
1968 if (mode == V_PEEK) {
1969 vp->needsPutBack = 0;
1971 vp->needsPutBack = 1;
1973 #else /* !AFS_DEMAND_ATTACH_FS */
1974 /* duplicate computation in fssync.c about whether the server
1975 * takes the volume offline or not. If the volume isn't
1976 * offline, we must not return it when we detach the volume,
1977 * or the server will abort */
1978 if (mode == V_READONLY || mode == V_PEEK
1979 || (!VolumeWriteable(vp) && (mode == V_CLONE || mode == V_DUMP)))
1980 vp->needsPutBack = 0;
1982 vp->needsPutBack = 1;
1983 #endif /* !AFS_DEMAND_ATTACH_FS */
1985 /* OK, there's a problem here, but one that I don't know how to
1986 * fix right now, and that I don't think should arise often.
1987 * Basically, we should only put back this volume to the server if
1988 * it was given to us by the server, but since we don't have a vp,
1989 * we can't run the VolumeWriteable function to find out as we do
1990 * above when computing vp->needsPutBack. So we send it back, but
1991 * there's a path in VAttachVolume on the server which may abort
1992 * if this volume doesn't have a header. Should be pretty rare
1993 * for all of that to happen, but if it does, probably the right
1994 * fix is for the server to allow the return of readonly volumes
1995 * that it doesn't think are really checked out. */
1996 #ifdef FSSYNC_BUILD_CLIENT
1997 if (programType == volumeUtility && vp == NULL &&
1998 mode != V_SECRETLY && mode != V_PEEK) {
1999 FSYNC_VolOp(iheader.id, partition, FSYNC_VOL_ON, 0, NULL);
2002 if (programType == fileServer && vp) {
2003 #ifdef AFS_DEMAND_ATTACH_FS
2005 * we can get here in cases where we don't "own"
2006 * the volume (e.g. volume owned by a utility).
2007 * short circuit around potential disk header races.
2009 if (V_attachState(vp) != VOL_STATE_ATTACHED) {
2013 V_needsCallback(vp) = 0;
2015 if (VInit >= 2 && V_BreakVolumeCallbacks) {
2016 Log("VAttachVolume: Volume %u was changed externally; breaking callbacks\n", V_id(vp));
2017 (*V_BreakVolumeCallbacks) (V_id(vp));
2020 VUpdateVolume_r(ec, vp, 0);
2022 Log("VAttachVolume: Error updating volume\n");
2027 if (VolumeWriteable(vp) && V_dontSalvage(vp) == 0) {
2028 #ifndef AFS_DEMAND_ATTACH_FS
2029 /* This is a hack: by temporarily setting the incore
2030 * dontSalvage flag ON, the volume will be put back on the
2031 * Update list (with dontSalvage OFF again). It will then
2032 * come back in N minutes with DONT_SALVAGE eventually
2033 * set. This is the way that volumes that have never had
2034 * it set get it set; or that volumes that have been
2035 * offline without DONT SALVAGE having been set also
2036 * eventually get it set */
2037 V_dontSalvage(vp) = DONT_SALVAGE;
2038 #endif /* !AFS_DEMAND_ATTACH_FS */
2039 VAddToVolumeUpdateList_r(ec, vp);
2041 Log("VAttachVolume: Error adding volume to update list\n");
2048 Log("VOnline: volume %u (%s) attached and online\n", V_id(vp),
2053 if (programType == volumeUtility) {
2054 VUnlockPartition_r(partition);
2057 #ifdef AFS_DEMAND_ATTACH_FS
2058 /* attach failed; make sure we're in error state */
2059 if (vp && !VIsErrorState(V_attachState(vp))) {
2060 VChangeState_r(vp, VOL_STATE_ERROR);
2062 #endif /* AFS_DEMAND_ATTACH_FS */
2069 #ifdef AFS_DEMAND_ATTACH_FS
2070 /* VAttachVolumeByVp_r
2072 * finish attaching a volume that is
2073 * in a less than fully attached state
2075 /* caller MUST hold a ref count on vp */
2077 VAttachVolumeByVp_r(Error * ec, Volume * vp, int mode)
2079 char name[VMAXPATHLEN];
2080 int fd, n, reserve = 0;
2081 struct afs_stat status;
2082 struct VolumeDiskHeader diskHeader;
2083 struct VolumeHeader iheader;
2084 struct DiskPartition64 *partp;
2089 VolumeStats stats_save;
2092 /* volume utility should never call AttachByVp */
2093 assert(programType == fileServer);
2095 volumeId = vp->hashid;
2096 partp = vp->partition;
2097 VolumeExternalName_r(volumeId, name, sizeof(name));
2100 /* if another thread is performing a blocking op, wait */
2101 VWaitExclusiveState_r(vp);
2103 memcpy(&stats_save, &vp->stats, sizeof(VolumeStats));
2105 /* if it's already attached, see if we can return it */
2106 if (V_attachState(vp) == VOL_STATE_ATTACHED) {
2107 VGetVolumeByVp_r(ec, vp);
2108 if (V_inUse(vp) == fileServer) {
2111 if (vp->specialStatus == VBUSY)
2113 VDetachVolume_r(ec, vp);
2115 Log("VAttachVolume: Error detaching volume (%s)\n", name);
2121 /* pre-attach volume if it hasn't been done yet */
2123 (V_attachState(vp) == VOL_STATE_UNATTACHED) ||
2124 (V_attachState(vp) == VOL_STATE_ERROR)) {
2125 nvp = VPreAttachVolumeByVp_r(ec, partp, vp, volumeId);
2131 VCreateReservation_r(nvp);
2137 VChangeState_r(vp, VOL_STATE_ATTACHING);
2139 /* restore monotonically increasing stats */
2140 memcpy(&vp->stats, &stats_save, sizeof(VolumeStats));
2145 /* compute path to disk header,
2147 * and verify magic and version stamps */
2148 strcpy(path, VPartitionPath(partp));
2154 if ((fd = afs_open(path, O_RDONLY)) == -1 || afs_fstat(fd, &status) == -1) {
2155 Log("VAttachVolume: Failed to open %s (errno %d)\n", path, errno);
2162 n = read(fd, &diskHeader, sizeof(diskHeader));
2164 if (n != sizeof(diskHeader)
2165 || diskHeader.stamp.magic != VOLUMEHEADERMAGIC) {
2166 Log("VAttachVolume: Error reading volume header %s\n", path);
2171 if (diskHeader.stamp.version != VOLUMEHEADERVERSION) {
2172 Log("VAttachVolume: Volume %s, version number is incorrect; volume needs salvaged\n", path);
2178 /* convert on-disk header format to in-memory header format */
2179 DiskToVolumeHeader(&iheader, &diskHeader);
2183 * NOTE: attach2 is entered without any locks, and returns
2184 * with vol_glock_mutex held */
2185 vp = attach2(ec, volumeId, path, &iheader, partp, vp, isbusy, mode);
2188 * the event that an error was encountered, or
2189 * the volume was not brought to an attached state
2190 * for any reason, skip to the end. We cannot
2191 * safely call VUpdateVolume unless we "own" it.
2195 (V_attachState(vp) != VOL_STATE_ATTACHED)) {
2199 V_needsCallback(vp) = 0;
2200 VUpdateVolume_r(ec, vp, 0);
2202 Log("VAttachVolume: Error updating volume %u\n", vp->hashid);
2206 if (VolumeWriteable(vp) && V_dontSalvage(vp) == 0) {
2207 #ifndef AFS_DEMAND_ATTACH_FS
2208 /* This is a hack: by temporarily setting the incore
2209 * dontSalvage flag ON, the volume will be put back on the
2210 * Update list (with dontSalvage OFF again). It will then
2211 * come back in N minutes with DONT_SALVAGE eventually
2212 * set. This is the way that volumes that have never had
2213 * it set get it set; or that volumes that have been
2214 * offline without DONT SALVAGE having been set also
2215 * eventually get it set */
2216 V_dontSalvage(vp) = DONT_SALVAGE;
2217 #endif /* !AFS_DEMAND_ATTACH_FS */
2218 VAddToVolumeUpdateList_r(ec, vp);
2220 Log("VAttachVolume: Error adding volume %u to update list\n", vp->hashid);
2227 Log("VOnline: volume %u (%s) attached and online\n", V_id(vp),
2231 VCancelReservation_r(nvp);
2234 if (*ec && (*ec != VOFFLINE) && (*ec != VSALVAGE)) {
2235 if (vp && !VIsErrorState(V_attachState(vp))) {
2236 VChangeState_r(vp, VOL_STATE_ERROR);
2243 #endif /* AFS_DEMAND_ATTACH_FS */
2246 * called without any locks held
2247 * returns with vol_glock_mutex held
2250 attach2(Error * ec, VolId volumeId, char *path, register struct VolumeHeader * header,
2251 struct DiskPartition64 * partp, register Volume * vp, int isbusy, int mode)
2253 vp->specialStatus = (byte) (isbusy ? VBUSY : 0);
2254 IH_INIT(vp->vnodeIndex[vLarge].handle, partp->device, header->parent,
2255 header->largeVnodeIndex);
2256 IH_INIT(vp->vnodeIndex[vSmall].handle, partp->device, header->parent,
2257 header->smallVnodeIndex);
2258 IH_INIT(vp->diskDataHandle, partp->device, header->parent,
2259 header->volumeInfo);
2260 IH_INIT(vp->linkHandle, partp->device, header->parent, header->linkTable);
2261 vp->shuttingDown = 0;
2262 vp->goingOffline = 0;
2264 #ifdef AFS_DEMAND_ATTACH_FS
2265 vp->stats.last_attach = FT_ApproxTime();
2266 vp->stats.attaches++;
2270 IncUInt64(&VStats.attaches);
2271 vp->cacheCheck = ++VolumeCacheCheck;
2272 /* just in case this ever rolls over */
2273 if (!vp->cacheCheck)
2274 vp->cacheCheck = ++VolumeCacheCheck;
2275 GetVolumeHeader(vp);
2278 #if defined(AFS_DEMAND_ATTACH_FS) && defined(FSSYNC_BUILD_CLIENT)
2279 /* demand attach changes the V_PEEK mechanism
2281 * we can now suck the current disk data structure over
2282 * the fssync interface without going to disk
2284 * (technically, we don't need to restrict this feature
2285 * to demand attach fileservers. However, I'm trying
2286 * to limit the number of common code changes)
2288 if (programType != fileServer && mode == V_PEEK) {
2290 res.payload.len = sizeof(VolumeDiskData);
2291 res.payload.buf = &vp->header->diskstuff;
2293 if (FSYNC_VolOp(volumeId,
2295 FSYNC_VOL_QUERY_HDR,
2298 goto disk_header_loaded;
2301 #endif /* AFS_DEMAND_ATTACH_FS && FSSYNC_BUILD_CLIENT */
2302 (void)ReadHeader(ec, V_diskDataHandle(vp), (char *)&V_disk(vp),
2303 sizeof(V_disk(vp)), VOLUMEINFOMAGIC, VOLUMEINFOVERSION);
2305 #ifdef AFS_DEMAND_ATTACH_FS
2308 IncUInt64(&VStats.hdr_loads);
2309 IncUInt64(&vp->stats.hdr_loads);
2311 #endif /* AFS_DEMAND_ATTACH_FS */
2314 Log("VAttachVolume: Error reading diskDataHandle vol header %s; error=%u\n", path, *ec);
2319 #ifdef AFS_DEMAND_ATTACH_FS
2322 /* check for pending volume operations */
2323 if (vp->pending_vol_op) {
2324 /* see if the pending volume op requires exclusive access */
2325 if (!VVolOpLeaveOnline_r(vp, vp->pending_vol_op)) {
2326 /* mark the volume down */
2328 VChangeState_r(vp, VOL_STATE_UNATTACHED);
2329 if (V_offlineMessage(vp)[0] == '\0')
2330 strlcpy(V_offlineMessage(vp),
2331 "A volume utility is running.",
2332 sizeof(V_offlineMessage(vp)));
2333 V_offlineMessage(vp)[sizeof(V_offlineMessage(vp)) - 1] = '\0';
2335 /* check to see if we should set the specialStatus flag */
2336 if (VVolOpSetVBusy_r(vp, vp->pending_vol_op)) {
2337 vp->specialStatus = VBUSY;
2342 V_attachFlags(vp) |= VOL_HDR_LOADED;
2343 vp->stats.last_hdr_load = vp->stats.last_attach;
2345 #endif /* AFS_DEMAND_ATTACH_FS */
2348 struct IndexFileHeader iHead;
2350 #if OPENAFS_VOL_STATS
2352 * We just read in the diskstuff part of the header. If the detailed
2353 * volume stats area has not yet been initialized, we should bzero the
2354 * area and mark it as initialized.
2356 if (!(V_stat_initialized(vp))) {
2357 memset((char *)(V_stat_area(vp)), 0, VOL_STATS_BYTES);
2358 V_stat_initialized(vp) = 1;
2360 #endif /* OPENAFS_VOL_STATS */
2362 (void)ReadHeader(ec, vp->vnodeIndex[vSmall].handle,
2363 (char *)&iHead, sizeof(iHead),
2364 SMALLINDEXMAGIC, SMALLINDEXVERSION);
2367 Log("VAttachVolume: Error reading smallVnode vol header %s; error=%u\n", path, *ec);
2372 struct IndexFileHeader iHead;
2374 (void)ReadHeader(ec, vp->vnodeIndex[vLarge].handle,
2375 (char *)&iHead, sizeof(iHead),
2376 LARGEINDEXMAGIC, LARGEINDEXVERSION);
2379 Log("VAttachVolume: Error reading largeVnode vol header %s; error=%u\n", path, *ec);
2383 #ifdef AFS_NAMEI_ENV
2385 struct versionStamp stamp;
2387 (void)ReadHeader(ec, V_linkHandle(vp), (char *)&stamp,
2388 sizeof(stamp), LINKTABLEMAGIC, LINKTABLEVERSION);
2391 Log("VAttachVolume: Error reading namei vol header %s; error=%u\n", path, *ec);
2394 #endif /* AFS_NAMEI_ENV */
2396 #if defined(AFS_DEMAND_ATTACH_FS)
2397 if (*ec && ((*ec != VOFFLINE) || (V_attachState(vp) != VOL_STATE_UNATTACHED))) {
2399 if (programType == fileServer) {
2400 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
2403 Log("VAttachVolume: Error attaching volume %s; volume needs salvage; error=%u\n", path, *ec);
2409 /* volume operation in progress */
2413 #else /* AFS_DEMAND_ATTACH_FS */
2415 Log("VAttachVolume: Error attaching volume %s; volume needs salvage; error=%u\n", path, *ec);
2420 #endif /* AFS_DEMAND_ATTACH_FS */
2422 if (V_needsSalvaged(vp)) {
2423 if (vp->specialStatus)
2424 vp->specialStatus = 0;
2426 #if defined(AFS_DEMAND_ATTACH_FS)
2427 if (programType == fileServer) {
2428 VRequestSalvage_r(ec, vp, SALVSYNC_NEEDED, VOL_SALVAGE_INVALIDATE_HEADER);
2431 Log("VAttachVolume: volume salvage flag is ON for %s; volume needs salvage\n", path);
2435 #else /* AFS_DEMAND_ATTACH_FS */
2438 #endif /* AFS_DEMAND_ATTACH_FS */
2443 if (programType == fileServer) {
2444 #ifndef FAST_RESTART
2445 if (V_inUse(vp) && VolumeWriteable(vp)) {
2446 if (!V_needsSalvaged(vp)) {
2447 V_needsSalvaged(vp) = 1;
2448 VUpdateVolume_r(ec, vp, 0);
2450 #if defined(AFS_DEMAND_ATTACH_FS)
2451 VRequestSalvage_r(ec, vp, SALVSYNC_NEEDED, VOL_SALVAGE_INVALIDATE_HEADER);
2453 #else /* AFS_DEMAND_ATTACH_FS */
2454 Log("VAttachVolume: volume %s needs to be salvaged; not attached.\n", path);
2457 #endif /* AFS_DEMAND_ATTACH_FS */
2460 #endif /* FAST_RESTART */
2462 if (V_destroyMe(vp) == DESTROY_ME) {
2463 #if defined(AFS_DEMAND_ATTACH_FS)
2464 /* schedule a salvage so the volume goes away on disk */
2465 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
2466 VChangeState_r(vp, VOL_STATE_ERROR);
2468 #endif /* AFS_DEMAND_ATTACH_FS */
2470 Log("VAttachVolume: volume %s is junk; it should be destroyed at next salvage\n", path);
2476 vp->nextVnodeUnique = V_uniquifier(vp);
2477 vp->vnodeIndex[vSmall].bitmap = vp->vnodeIndex[vLarge].bitmap = NULL;
2478 #ifndef BITMAP_LATER
2479 if (programType == fileServer && VolumeWriteable(vp)) {
2481 for (i = 0; i < nVNODECLASSES; i++) {
2482 VGetBitmap_r(ec, vp, i);
2484 #ifdef AFS_DEMAND_ATTACH_FS
2485 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
2487 #else /* AFS_DEMAND_ATTACH_FS */
2489 #endif /* AFS_DEMAND_ATTACH_FS */
2490 Log("VAttachVolume: error getting bitmap for volume (%s)\n",
2496 #endif /* BITMAP_LATER */
2498 if (programType == fileServer) {
2499 if (vp->specialStatus)
2500 vp->specialStatus = 0;
2501 if (V_blessed(vp) && V_inService(vp) && !V_needsSalvaged(vp)) {
2502 V_inUse(vp) = fileServer;
2503 V_offlineMessage(vp)[0] = '\0';
2506 V_inUse(vp) = programType;
2507 V_checkoutMode(vp) = mode;
2510 AddVolumeToHashTable(vp, V_id(vp));
2511 #ifdef AFS_DEMAND_ATTACH_FS
2512 if ((programType != fileServer) ||
2513 (V_inUse(vp) == fileServer)) {
2514 AddVolumeToVByPList_r(vp);
2516 VChangeState_r(vp, VOL_STATE_ATTACHED);
2518 VChangeState_r(vp, VOL_STATE_UNATTACHED);
2524 /* Attach an existing volume.
2525 The volume also normally goes online at this time.
2526 An offline volume must be reattached to make it go online.
2530 VAttachVolume(Error * ec, VolumeId volumeId, int mode)
2534 retVal = VAttachVolume_r(ec, volumeId, mode);
2540 VAttachVolume_r(Error * ec, VolumeId volumeId, int mode)
2543 VGetVolumePath(ec, volumeId, &part, &name);
2545 register Volume *vp;
2547 vp = VGetVolume_r(&error, volumeId);
2549 assert(V_inUse(vp) == 0);
2550 VDetachVolume_r(ec, vp);
2554 return VAttachVolumeByName_r(ec, part, name, mode);
2557 /* Increment a reference count to a volume, sans context swaps. Requires
2558 * possibly reading the volume header in from the disk, since there's
2559 * an invariant in the volume package that nUsers>0 ==> vp->header is valid.
2561 * N.B. This call can fail if we can't read in the header!! In this case
2562 * we still guarantee we won't context swap, but the ref count won't be
2563 * incremented (otherwise we'd violate the invariant).
2565 /* NOTE: with the demand attach fileserver extensions, the global lock
2566 * is dropped within VHold */
2567 #ifdef AFS_DEMAND_ATTACH_FS
2569 VHold_r(register Volume * vp)
2573 VCreateReservation_r(vp);
2574 VWaitExclusiveState_r(vp);
2576 LoadVolumeHeader(&error, vp);
2578 VCancelReservation_r(vp);
2582 VCancelReservation_r(vp);
2585 #else /* AFS_DEMAND_ATTACH_FS */
2587 VHold_r(register Volume * vp)
2591 LoadVolumeHeader(&error, vp);
2597 #endif /* AFS_DEMAND_ATTACH_FS */
2600 VHold(register Volume * vp)
2604 retVal = VHold_r(vp);
2610 /***************************************************/
2611 /* get and put volume routines */
2612 /***************************************************/
2615 * put back a heavyweight reference to a volume object.
2617 * @param[in] vp volume object pointer
2619 * @pre VOL_LOCK held
2621 * @post heavyweight volume reference put back.
2622 * depending on state, volume may have been taken offline,
2623 * detached, salvaged, freed, etc.
2625 * @internal volume package internal use only
2628 VPutVolume_r(register Volume * vp)
2630 assert(--vp->nUsers >= 0);
2631 if (vp->nUsers == 0) {
2633 ReleaseVolumeHeader(vp->header);
2634 #ifdef AFS_DEMAND_ATTACH_FS
2635 if (!VCheckDetach(vp)) {
2639 #else /* AFS_DEMAND_ATTACH_FS */
2641 #endif /* AFS_DEMAND_ATTACH_FS */
2646 VPutVolume(register Volume * vp)
2654 /* Get a pointer to an attached volume. The pointer is returned regardless
2655 of whether or not the volume is in service or on/off line. An error
2656 code, however, is returned with an indication of the volume's status */
2658 VGetVolume(Error * ec, Error * client_ec, VolId volumeId)
2662 retVal = GetVolume(ec, client_ec, volumeId, NULL, 0);
2668 VGetVolume_r(Error * ec, VolId volumeId)
2670 return GetVolume(ec, NULL, volumeId, NULL, 0);
2673 /* try to get a volume we've previously looked up */
2674 /* for demand attach fs, caller MUST NOT hold a ref count on vp */
2676 VGetVolumeByVp_r(Error * ec, Volume * vp)
2678 return GetVolume(ec, NULL, vp->hashid, vp, 0);
2681 /* private interface for getting a volume handle
2682 * volumeId must be provided.
2683 * hint is an optional parameter to speed up hash lookups
2684 * flags is not used at this time
2686 /* for demand attach fs, caller MUST NOT hold a ref count on hint */
2688 GetVolume(Error * ec, Error * client_ec, VolId volumeId, Volume * hint, int flags)
2691 /* pull this profiling/debugging code out of regular builds */
2693 #define VGET_CTR_INC(x) x++
2694 unsigned short V0 = 0, V1 = 0, V2 = 0, V3 = 0, V5 = 0, V6 =
2695 0, V7 = 0, V8 = 0, V9 = 0;
2696 unsigned short V10 = 0, V11 = 0, V12 = 0, V13 = 0, V14 = 0, V15 = 0;
2698 #define VGET_CTR_INC(x)
2700 #ifdef AFS_DEMAND_ATTACH_FS
2701 Volume *avp, * rvp = hint;
2705 * if VInit is zero, the volume package dynamic
2706 * data structures have not been initialized yet,
2707 * and we must immediately return an error
2713 *client_ec = VOFFLINE;
2718 #ifdef AFS_DEMAND_ATTACH_FS
2720 VCreateReservation_r(rvp);
2722 #endif /* AFS_DEMAND_ATTACH_FS */
2730 vp = VLookupVolume_r(ec, volumeId, vp);
2736 #ifdef AFS_DEMAND_ATTACH_FS
2737 if (rvp && (rvp != vp)) {
2738 /* break reservation on old vp */
2739 VCancelReservation_r(rvp);
2742 #endif /* AFS_DEMAND_ATTACH_FS */
2748 /* Until we have reached an initialization level of 2
2749 * we don't know whether this volume exists or not.
2750 * We can't sleep and retry later because before a volume
2751 * is attached, the caller tries to get it first. Just
2752 * return VOFFLINE and the caller can choose whether to
2753 * retry the command or not. */
2763 IncUInt64(&VStats.hdr_gets);
2765 #ifdef AFS_DEMAND_ATTACH_FS
2766 /* block if someone else is performing an exclusive op on this volume */
2769 VCreateReservation_r(rvp);
2771 VWaitExclusiveState_r(vp);
2773 /* short circuit with VNOVOL in the following circumstances:
2776 * - VOL_STATE_SHUTTING_DOWN
2778 if ((V_attachState(vp) == VOL_STATE_ERROR) ||
2779 (V_attachState(vp) == VOL_STATE_SHUTTING_DOWN)) {
2786 * short circuit with VOFFLINE in the following circumstances:
2788 * - VOL_STATE_UNATTACHED
2790 if (V_attachState(vp) == VOL_STATE_UNATTACHED) {
2791 if (vp->specialStatus) {
2792 *ec = vp->specialStatus;
2800 /* allowable states:
2807 if (vp->salvage.requested) {
2808 VUpdateSalvagePriority_r(vp);
2811 if (V_attachState(vp) == VOL_STATE_PREATTACHED) {
2812 avp = VAttachVolumeByVp_r(ec, vp, 0);
2815 /* VAttachVolumeByVp_r can return a pointer
2816 * != the vp passed to it under certain
2817 * conditions; make sure we don't leak
2818 * reservations if that happens */
2820 VCancelReservation_r(rvp);
2822 VCreateReservation_r(rvp);
2832 if (!vp->pending_vol_op) {
2847 if ((V_attachState(vp) == VOL_STATE_SALVAGING) ||
2848 (*ec == VSALVAGING)) {
2850 /* see CheckVnode() in afsfileprocs.c for an explanation
2851 * of this error code logic */
2852 afs_uint32 now = FT_ApproxTime();
2853 if ((vp->stats.last_salvage + (10 * 60)) >= now) {
2856 *client_ec = VRESTARTING;
2865 LoadVolumeHeader(ec, vp);
2868 /* Only log the error if it was a totally unexpected error. Simply
2869 * a missing inode is likely to be caused by the volume being deleted */
2870 if (errno != ENXIO || LogLevel)
2871 Log("Volume %u: couldn't reread volume header\n",
2873 #ifdef AFS_DEMAND_ATTACH_FS
2874 if (programType == fileServer) {
2875 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
2880 #else /* AFS_DEMAND_ATTACH_FS */
2883 #endif /* AFS_DEMAND_ATTACH_FS */
2887 #ifdef AFS_DEMAND_ATTACH_FS
2889 * this test MUST happen after the volume header is loaded
2891 if (vp->pending_vol_op && !VVolOpLeaveOnline_r(vp, vp->pending_vol_op)) {
2893 * volume cannot remain online during this volume operation.
2896 if (vp->specialStatus) {
2898 * special status codes outrank normal VOFFLINE code
2900 *ec = vp->specialStatus;
2902 *client_ec = vp->specialStatus;
2906 /* see CheckVnode() in afsfileprocs.c for an explanation
2907 * of this error code logic */
2908 afs_uint32 now = FT_ApproxTime();
2909 if ((vp->stats.last_vol_op + (10 * 60)) >= now) {
2912 *client_ec = VRESTARTING;
2917 ReleaseVolumeHeader(vp->header);
2921 #endif /* AFS_DEMAND_ATTACH_FS */
2924 if (vp->shuttingDown) {
2931 if (programType == fileServer) {
2933 if (vp->goingOffline) {
2935 #ifdef AFS_DEMAND_ATTACH_FS
2936 /* wait for the volume to go offline */
2937 if (V_attachState(vp) == VOL_STATE_GOING_OFFLINE) {
2938 VWaitStateChange_r(vp);
2940 #elif defined(AFS_PTHREAD_ENV)
2941 VOL_CV_WAIT(&vol_put_volume_cond);
2942 #else /* AFS_PTHREAD_ENV */
2943 LWP_WaitProcess(VPutVolume);
2944 #endif /* AFS_PTHREAD_ENV */
2947 if (vp->specialStatus) {
2949 *ec = vp->specialStatus;
2950 } else if (V_inService(vp) == 0 || V_blessed(vp) == 0) {
2953 } else if (V_inUse(vp) == 0) {
2964 #ifdef AFS_DEMAND_ATTACH_FS
2965 /* if no error, bump nUsers */
2968 VLRU_UpdateAccess_r(vp);
2971 VCancelReservation_r(rvp);
2974 if (client_ec && !*client_ec) {
2977 #else /* AFS_DEMAND_ATTACH_FS */
2978 /* if no error, bump nUsers */
2985 #endif /* AFS_DEMAND_ATTACH_FS */
2993 /***************************************************/
2994 /* Volume offline/detach routines */
2995 /***************************************************/
2997 /* caller MUST hold a heavyweight ref on vp */
2998 #ifdef AFS_DEMAND_ATTACH_FS
3000 VTakeOffline_r(register Volume * vp)
3004 assert(vp->nUsers > 0);
3005 assert(programType == fileServer);
3007 VCreateReservation_r(vp);
3008 VWaitExclusiveState_r(vp);
3010 vp->goingOffline = 1;
3011 V_needsSalvaged(vp) = 1;
3013 VRequestSalvage_r(&error, vp, SALVSYNC_ERROR, 0);
3014 VCancelReservation_r(vp);
3016 #else /* AFS_DEMAND_ATTACH_FS */
3018 VTakeOffline_r(register Volume * vp)
3020 assert(vp->nUsers > 0);
3021 assert(programType == fileServer);
3023 vp->goingOffline = 1;
3024 V_needsSalvaged(vp) = 1;
3026 #endif /* AFS_DEMAND_ATTACH_FS */
3029 VTakeOffline(register Volume * vp)
3037 * force a volume offline.
3039 * @param[in] vp volume object pointer
3040 * @param[in] flags flags (see note below)
3042 * @note the flag VOL_FORCEOFF_NOUPDATE is a recursion control flag
3043 * used when VUpdateVolume_r needs to call VForceOffline_r
3044 * (which in turn would normally call VUpdateVolume_r)
3046 * @see VUpdateVolume_r
3048 * @pre VOL_LOCK must be held.
3049 * for DAFS, caller must hold ref.
3051 * @note for DAFS, it _is safe_ to call this function from an
3054 * @post needsSalvaged flag is set.
3055 * for DAFS, salvage is requested.
3056 * no further references to the volume through the volume
3057 * package will be honored.
3058 * all file descriptor and vnode caches are invalidated.
3060 * @warning this is a heavy-handed interface. it results in
3061 * a volume going offline regardless of the current
3062 * reference count state.
3064 * @internal volume package internal use only
3067 VForceOffline_r(Volume * vp, int flags)
3071 #ifdef AFS_DEMAND_ATTACH_FS
3072 VChangeState_r(vp, VOL_STATE_ERROR);
3077 strcpy(V_offlineMessage(vp),
3078 "Forced offline due to internal error: volume needs to be salvaged");
3079 Log("Volume %u forced offline: it needs salvaging!\n", V_id(vp));
3082 vp->goingOffline = 0;
3083 V_needsSalvaged(vp) = 1;
3084 if (!(flags & VOL_FORCEOFF_NOUPDATE)) {
3085 VUpdateVolume_r(&error, vp, VOL_UPDATE_NOFORCEOFF);
3088 #ifdef AFS_DEMAND_ATTACH_FS
3089 VRequestSalvage_r(&error, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
3090 #endif /* AFS_DEMAND_ATTACH_FS */
3092 #ifdef AFS_PTHREAD_ENV
3093 assert(pthread_cond_broadcast(&vol_put_volume_cond) == 0);
3094 #else /* AFS_PTHREAD_ENV */
3095 LWP_NoYieldSignal(VPutVolume);
3096 #endif /* AFS_PTHREAD_ENV */
3098 VReleaseVolumeHandles_r(vp);
3102 * force a volume offline.
3104 * @param[in] vp volume object pointer
3106 * @see VForceOffline_r
3109 VForceOffline(Volume * vp)
3112 VForceOffline_r(vp, 0);
3116 /* The opposite of VAttachVolume. The volume header is written to disk, with
3117 the inUse bit turned off. A copy of the header is maintained in memory,
3118 however (which is why this is VOffline, not VDetach).
3121 VOffline_r(Volume * vp, char *message)
3124 VolumeId vid = V_id(vp);
3126 assert(programType != volumeUtility);
3131 if (V_offlineMessage(vp)[0] == '\0')
3132 strncpy(V_offlineMessage(vp), message, sizeof(V_offlineMessage(vp)));
3133 V_offlineMessage(vp)[sizeof(V_offlineMessage(vp)) - 1] = '\0';
3135 vp->goingOffline = 1;
3136 #ifdef AFS_DEMAND_ATTACH_FS
3137 VChangeState_r(vp, VOL_STATE_GOING_OFFLINE);
3138 VCreateReservation_r(vp);
3141 /* wait for the volume to go offline */
3142 if (V_attachState(vp) == VOL_STATE_GOING_OFFLINE) {
3143 VWaitStateChange_r(vp);
3145 VCancelReservation_r(vp);
3146 #else /* AFS_DEMAND_ATTACH_FS */
3148 vp = VGetVolume_r(&error, vid); /* Wait for it to go offline */
3149 if (vp) /* In case it was reattached... */
3151 #endif /* AFS_DEMAND_ATTACH_FS */
3155 VOffline(Volume * vp, char *message)
3158 VOffline_r(vp, message);
3162 /* This gets used for the most part by utility routines that don't want
3163 * to keep all the volume headers around. Generally, the file server won't
3164 * call this routine, because then the offline message in the volume header
3165 * (or other information) won't be available to clients. For NAMEI, also
3166 * close the file handles. However, the fileserver does call this during
3167 * an attach following a volume operation.
3170 VDetachVolume_r(Error * ec, Volume * vp)
3173 struct DiskPartition64 *tpartp;
3174 int notifyServer, useDone = FSYNC_VOL_ON;
3176 *ec = 0; /* always "succeeds" */
3177 if (programType == volumeUtility) {
3178 notifyServer = vp->needsPutBack;
3179 if (V_destroyMe(vp) == DESTROY_ME)
3180 useDone = FSYNC_VOL_DONE;
3181 #ifdef AFS_DEMAND_ATTACH_FS
3182 else if (!V_blessed(vp) || !V_inService(vp))
3183 useDone = FSYNC_VOL_LEAVE_OFF;
3186 tpartp = vp->partition;
3188 DeleteVolumeFromHashTable(vp);
3189 vp->shuttingDown = 1;
3190 #ifdef AFS_DEMAND_ATTACH_FS
3191 DeleteVolumeFromVByPList_r(vp);
3193 VChangeState_r(vp, VOL_STATE_SHUTTING_DOWN);
3194 #endif /* AFS_DEMAND_ATTACH_FS */
3196 /* Will be detached sometime in the future--this is OK since volume is offline */
3198 /* XXX the following code should really be moved to VCheckDetach() since the volume
3199 * is not technically detached until the refcounts reach zero
3201 #ifdef FSSYNC_BUILD_CLIENT
3202 if (programType == volumeUtility && notifyServer) {
3204 * Note: The server is not notified in the case of a bogus volume
3205 * explicitly to make it possible to create a volume, do a partial
3206 * restore, then abort the operation without ever putting the volume
3207 * online. This is essential in the case of a volume move operation
3208 * between two partitions on the same server. In that case, there
3209 * would be two instances of the same volume, one of them bogus,
3210 * which the file server would attempt to put on line
3212 FSYNC_VolOp(volume, tpartp->name, useDone, 0, NULL);
3213 /* XXX this code path is only hit by volume utilities, thus
3214 * V_BreakVolumeCallbacks will always be NULL. if we really
3215 * want to break callbacks in this path we need to use FSYNC_VolOp() */
3217 /* Dettaching it so break all callbacks on it */
3218 if (V_BreakVolumeCallbacks) {
3219 Log("volume %u detached; breaking all call backs\n", volume);
3220 (*V_BreakVolumeCallbacks) (volume);
3224 #endif /* FSSYNC_BUILD_CLIENT */
3228 VDetachVolume(Error * ec, Volume * vp)
3231 VDetachVolume_r(ec, vp);
3236 /***************************************************/
3237 /* Volume fd/inode handle closing routines */
3238 /***************************************************/
3240 /* For VDetachVolume, we close all cached file descriptors, but keep
3241 * the Inode handles in case we need to read from a busy volume.
3243 /* for demand attach, caller MUST hold ref count on vp */
3245 VCloseVolumeHandles_r(Volume * vp)
3247 #ifdef AFS_DEMAND_ATTACH_FS
3248 VolState state_save;
3250 state_save = VChangeState_r(vp, VOL_STATE_OFFLINING);
3255 * XXX need to investigate whether we can perform
3256 * DFlushVolume outside of vol_glock_mutex...
3258 * VCloseVnodeFiles_r drops the glock internally */
3259 DFlushVolume(V_id(vp));
3260 VCloseVnodeFiles_r(vp);
3262 #ifdef AFS_DEMAND_ATTACH_FS
3266 /* Too time consuming and unnecessary for the volserver */
3267 if (programType != volumeUtility) {
3268 IH_CONDSYNC(vp->vnodeIndex[vLarge].handle);
3269 IH_CONDSYNC(vp->vnodeIndex[vSmall].handle);
3270 IH_CONDSYNC(vp->diskDataHandle);
3272 IH_CONDSYNC(vp->linkHandle);
3273 #endif /* AFS_NT40_ENV */
3276 IH_REALLYCLOSE(vp->vnodeIndex[vLarge].handle);
3277 IH_REALLYCLOSE(vp->vnodeIndex[vSmall].handle);
3278 IH_REALLYCLOSE(vp->diskDataHandle);
3279 IH_REALLYCLOSE(vp->linkHandle);
3281 #ifdef AFS_DEMAND_ATTACH_FS
3283 VChangeState_r(vp, state_save);
3287 /* For both VForceOffline and VOffline, we close all relevant handles.
3288 * For VOffline, if we re-attach the volume, the files may possible be
3289 * different than before.
3291 /* for demand attach, caller MUST hold a ref count on vp */
3293 VReleaseVolumeHandles_r(Volume * vp)
3295 #ifdef AFS_DEMAND_ATTACH_FS
3296 VolState state_save;
3298 state_save = VChangeState_r(vp, VOL_STATE_DETACHING);
3301 /* XXX need to investigate whether we can perform
3302 * DFlushVolume outside of vol_glock_mutex... */
3303 DFlushVolume(V_id(vp));
3305 VReleaseVnodeFiles_r(vp); /* releases the glock internally */
3307 #ifdef AFS_DEMAND_ATTACH_FS
3311 /* Too time consuming and unnecessary for the volserver */
3312 if (programType != volumeUtility) {
3313 IH_CONDSYNC(vp->vnodeIndex[vLarge].handle);
3314 IH_CONDSYNC(vp->vnodeIndex[vSmall].handle);
3315 IH_CONDSYNC(vp->diskDataHandle);
3317 IH_CONDSYNC(vp->linkHandle);
3318 #endif /* AFS_NT40_ENV */
3321 IH_RELEASE(vp->vnodeIndex[vLarge].handle);
3322 IH_RELEASE(vp->vnodeIndex[vSmall].handle);
3323 IH_RELEASE(vp->diskDataHandle);
3324 IH_RELEASE(vp->linkHandle);
3326 #ifdef AFS_DEMAND_ATTACH_FS
3328 VChangeState_r(vp, state_save);
3333 /***************************************************/
3334 /* Volume write and fsync routines */
3335 /***************************************************/
3338 VUpdateVolume_r(Error * ec, Volume * vp, int flags)
3340 #ifdef AFS_DEMAND_ATTACH_FS
3341 VolState state_save;
3343 if (flags & VOL_UPDATE_WAIT) {
3344 VCreateReservation_r(vp);
3345 VWaitExclusiveState_r(vp);
3350 if (programType == fileServer)
3352 (V_inUse(vp) ? V_nextVnodeUnique(vp) +
3353 200 : V_nextVnodeUnique(vp));
3355 #ifdef AFS_DEMAND_ATTACH_FS
3356 state_save = VChangeState_r(vp, VOL_STATE_UPDATING);
3360 WriteVolumeHeader_r(ec, vp);
3362 #ifdef AFS_DEMAND_ATTACH_FS
3364 VChangeState_r(vp, state_save);
3365 if (flags & VOL_UPDATE_WAIT) {
3366 VCancelReservation_r(vp);
3371 Log("VUpdateVolume: error updating volume header, volume %u (%s)\n",
3372 V_id(vp), V_name(vp));
3373 /* try to update on-disk header,
3374 * while preventing infinite recursion */
3375 if (!(flags & VOL_UPDATE_NOFORCEOFF)) {
3376 VForceOffline_r(vp, VOL_FORCEOFF_NOUPDATE);
3382 VUpdateVolume(Error * ec, Volume * vp)
3385 VUpdateVolume_r(ec, vp, VOL_UPDATE_WAIT);
3390 VSyncVolume_r(Error * ec, Volume * vp, int flags)
3394 #ifdef AFS_DEMAND_ATTACH_FS
3395 VolState state_save;
3398 if (flags & VOL_SYNC_WAIT) {
3399 VUpdateVolume_r(ec, vp, VOL_UPDATE_WAIT);
3401 VUpdateVolume_r(ec, vp, 0);
3404 #ifdef AFS_DEMAND_ATTACH_FS
3405 state_save = VChangeState_r(vp, VOL_STATE_UPDATING);
3408 fdP = IH_OPEN(V_diskDataHandle(vp));
3409 assert(fdP != NULL);
3410 code = FDH_SYNC(fdP);
3413 #ifdef AFS_DEMAND_ATTACH_FS
3415 VChangeState_r(vp, state_save);
3421 VSyncVolume(Error * ec, Volume * vp)
3424 VSyncVolume_r(ec, vp, VOL_SYNC_WAIT);
3429 /***************************************************/
3430 /* Volume dealloaction routines */
3431 /***************************************************/
3433 #ifdef AFS_DEMAND_ATTACH_FS
3435 FreeVolume(Volume * vp)
3437 /* free the heap space, iff it's safe.
3438 * otherwise, pull it out of the hash table, so it
3439 * will get deallocated when all refs to it go away */
3440 if (!VCheckFree(vp)) {
3441 DeleteVolumeFromHashTable(vp);
3442 DeleteVolumeFromVByPList_r(vp);
3444 /* make sure we invalidate the header cache entry */
3445 FreeVolumeHeader(vp);
3448 #endif /* AFS_DEMAND_ATTACH_FS */
3451 ReallyFreeVolume(Volume * vp)
3456 #ifdef AFS_DEMAND_ATTACH_FS
3458 VChangeState_r(vp, VOL_STATE_FREED);
3459 if (vp->pending_vol_op)
3460 free(vp->pending_vol_op);
3461 #endif /* AFS_DEMAND_ATTACH_FS */
3462 for (i = 0; i < nVNODECLASSES; i++)
3463 if (vp->vnodeIndex[i].bitmap)
3464 free(vp->vnodeIndex[i].bitmap);
3465 FreeVolumeHeader(vp);
3466 #ifndef AFS_DEMAND_ATTACH_FS
3467 DeleteVolumeFromHashTable(vp);
3468 #endif /* AFS_DEMAND_ATTACH_FS */
3472 /* check to see if we should shutdown this volume
3473 * returns 1 if volume was freed, 0 otherwise */
3474 #ifdef AFS_DEMAND_ATTACH_FS
3476 VCheckDetach(register Volume * vp)
3481 if (vp->nUsers || vp->nWaiters)
3484 if (vp->shuttingDown) {
3486 if ((programType != fileServer) &&
3487 (V_inUse(vp) == programType) &&
3488 ((V_checkoutMode(vp) == V_VOLUPD) ||
3489 ((V_checkoutMode(vp) == V_CLONE) &&
3490 (VolumeWriteable(vp))))) {
3492 VUpdateVolume_r(&ec, vp, VOL_UPDATE_NOFORCEOFF);
3494 Log("VCheckDetach: volume header update for volume %u "
3495 "failed with errno %d\n", vp->hashid, errno);
3498 VReleaseVolumeHandles_r(vp);
3500 ReallyFreeVolume(vp);
3501 if (programType == fileServer) {
3502 assert(pthread_cond_broadcast(&vol_put_volume_cond) == 0);
3507 #else /* AFS_DEMAND_ATTACH_FS */
3509 VCheckDetach(register Volume * vp)
3517 if (vp->shuttingDown) {
3519 if ((programType != fileServer) &&
3520 (V_inUse(vp) == programType) &&
3521 ((V_checkoutMode(vp) == V_VOLUPD) ||
3522 ((V_checkoutMode(vp) == V_CLONE) &&
3523 (VolumeWriteable(vp))))) {
3525 VUpdateVolume_r(&ec, vp, VOL_UPDATE_NOFORCEOFF);
3527 Log("VCheckDetach: volume header update for volume %u failed with errno %d\n",
3531 VReleaseVolumeHandles_r(vp);
3532 ReallyFreeVolume(vp);
3533 if (programType == fileServer) {
3534 #if defined(AFS_PTHREAD_ENV)
3535 assert(pthread_cond_broadcast(&vol_put_volume_cond) == 0);
3536 #else /* AFS_PTHREAD_ENV */
3537 LWP_NoYieldSignal(VPutVolume);
3538 #endif /* AFS_PTHREAD_ENV */
3543 #endif /* AFS_DEMAND_ATTACH_FS */
3545 /* check to see if we should offline this volume
3546 * return 1 if volume went offline, 0 otherwise */
3547 #ifdef AFS_DEMAND_ATTACH_FS
3549 VCheckOffline(register Volume * vp)
3551 Volume * rvp = NULL;
3554 if (vp->goingOffline && !vp->nUsers) {
3556 assert(programType == fileServer);
3557 assert((V_attachState(vp) != VOL_STATE_ATTACHED) &&
3558 (V_attachState(vp) != VOL_STATE_FREED) &&
3559 (V_attachState(vp) != VOL_STATE_PREATTACHED) &&
3560 (V_attachState(vp) != VOL_STATE_UNATTACHED));
3564 * VOL_STATE_GOING_OFFLINE
3565 * VOL_STATE_SHUTTING_DOWN
3566 * VIsErrorState(V_attachState(vp))
3567 * VIsExclusiveState(V_attachState(vp))
3570 VCreateReservation_r(vp);
3571 VChangeState_r(vp, VOL_STATE_OFFLINING);
3574 /* must clear the goingOffline flag before we drop the glock */
3575 vp->goingOffline = 0;
3580 /* perform async operations */
3581 VUpdateVolume_r(&error, vp, 0);
3582 VCloseVolumeHandles_r(vp);
3585 Log("VOffline: Volume %u (%s) is now offline", V_id(vp),
3587 if (V_offlineMessage(vp)[0])
3588 Log(" (%s)", V_offlineMessage(vp));
3592 /* invalidate the volume header cache entry */
3593 FreeVolumeHeader(vp);
3595 /* if nothing changed state to error or salvaging,
3596 * drop state to unattached */
3597 if (!VIsErrorState(V_attachState(vp))) {
3598 VChangeState_r(vp, VOL_STATE_UNATTACHED);
3600 VCancelReservation_r(vp);
3601 /* no usage of vp is safe beyond this point */
3605 #else /* AFS_DEMAND_ATTACH_FS */
3607 VCheckOffline(register Volume * vp)
3609 Volume * rvp = NULL;
3612 if (vp->goingOffline && !vp->nUsers) {
3614 assert(programType == fileServer);
3617 vp->goingOffline = 0;
3619 VUpdateVolume_r(&error, vp, 0);
3620 VCloseVolumeHandles_r(vp);
3622 Log("VOffline: Volume %u (%s) is now offline", V_id(vp),
3624 if (V_offlineMessage(vp)[0])
3625 Log(" (%s)", V_offlineMessage(vp));
3628 FreeVolumeHeader(vp);
3629 #ifdef AFS_PTHREAD_ENV
3630 assert(pthread_cond_broadcast(&vol_put_volume_cond) == 0);
3631 #else /* AFS_PTHREAD_ENV */
3632 LWP_NoYieldSignal(VPutVolume);
3633 #endif /* AFS_PTHREAD_ENV */
3637 #endif /* AFS_DEMAND_ATTACH_FS */
3639 /***************************************************/
3640 /* demand attach fs ref counting routines */
3641 /***************************************************/
3643 #ifdef AFS_DEMAND_ATTACH_FS
3644 /* the following two functions handle reference counting for
3645 * asynchronous operations on volume structs.
3647 * their purpose is to prevent a VDetachVolume or VShutdown
3648 * from free()ing the Volume struct during an async i/o op */
3650 /* register with the async volume op ref counter */
3651 /* VCreateReservation_r moved into inline code header because it
3652 * is now needed in vnode.c -- tkeiser 11/20/2007
3656 * decrement volume-package internal refcount.
3658 * @param vp volume object pointer
3660 * @internal volume package internal use only
3663 * @arg VOL_LOCK is held
3664 * @arg lightweight refcount held
3666 * @post volume waiters refcount is decremented; volume may
3667 * have been deallocated/shutdown/offlined/salvaged/
3668 * whatever during the process
3670 * @warning once you have tossed your last reference (you can acquire
3671 * lightweight refs recursively) it is NOT SAFE to reference
3672 * a volume object pointer ever again
3674 * @see VCreateReservation_r
3676 * @note DEMAND_ATTACH_FS only
3679 VCancelReservation_r(Volume * vp)
3681 assert(--vp->nWaiters >= 0);
3682 if (vp->nWaiters == 0) {
3684 if (!VCheckDetach(vp)) {
3691 /* check to see if we should free this volume now
3692 * return 1 if volume was freed, 0 otherwise */
3694 VCheckFree(Volume * vp)
3697 if ((vp->nUsers == 0) &&
3698 (vp->nWaiters == 0) &&
3699 !(V_attachFlags(vp) & (VOL_IN_HASH |
3703 ReallyFreeVolume(vp);
3708 #endif /* AFS_DEMAND_ATTACH_FS */
3711 /***************************************************/
3712 /* online volume operations routines */
3713 /***************************************************/
3715 #ifdef AFS_DEMAND_ATTACH_FS
3717 * register a volume operation on a given volume.
3719 * @param[in] vp volume object
3720 * @param[in] vopinfo volume operation info object
3722 * @pre VOL_LOCK is held
3724 * @post volume operation info object attached to volume object.
3725 * volume operation statistics updated.
3727 * @note by "attached" we mean a copy of the passed in object is made
3729 * @internal volume package internal use only
3732 VRegisterVolOp_r(Volume * vp, FSSYNC_VolOp_info * vopinfo)
3734 FSSYNC_VolOp_info * info;
3736 /* attach a vol op info node to the volume struct */
3737 info = (FSSYNC_VolOp_info *) malloc(sizeof(FSSYNC_VolOp_info));
3738 assert(info != NULL);
3739 memcpy(info, vopinfo, sizeof(FSSYNC_VolOp_info));
3740 vp->pending_vol_op = info;
3743 vp->stats.last_vol_op = FT_ApproxTime();
3744 vp->stats.vol_ops++;
3745 IncUInt64(&VStats.vol_ops);
3751 * deregister the volume operation attached to this volume.
3753 * @param[in] vp volume object pointer
3755 * @pre VOL_LOCK is held
3757 * @post the volume operation info object is detached from the volume object
3759 * @internal volume package internal use only
3762 VDeregisterVolOp_r(Volume * vp)
3764 if (vp->pending_vol_op) {
3765 free(vp->pending_vol_op);
3766 vp->pending_vol_op = NULL;
3770 #endif /* AFS_DEMAND_ATTACH_FS */
3773 * determine whether it is safe to leave a volume online during
3774 * the volume operation described by the vopinfo object.
3776 * @param[in] vp volume object
3777 * @param[in] vopinfo volume operation info object
3779 * @return whether it is safe to leave volume online
3780 * @retval 0 it is NOT SAFE to leave the volume online
3781 * @retval 1 it is safe to leave the volume online during the operation
3784 * @arg VOL_LOCK is held
3785 * @arg disk header attached to vp (heavyweight ref on vp will guarantee
3786 * this condition is met)
3788 * @internal volume package internal use only
3791 VVolOpLeaveOnline_r(Volume * vp, FSSYNC_VolOp_info * vopinfo)
3793 return (vopinfo->com.command == FSYNC_VOL_NEEDVOLUME &&
3794 (vopinfo->com.reason == V_READONLY ||
3795 (!VolumeWriteable(vp) &&
3796 (vopinfo->com.reason == V_CLONE ||
3797 vopinfo->com.reason == V_DUMP))));
3801 * determine whether VBUSY should be set during this volume operation.
3803 * @param[in] vp volume object
3804 * @param[in] vopinfo volume operation info object
3806 * @return whether VBUSY should be set
3807 * @retval 0 VBUSY does NOT need to be set
3808 * @retval 1 VBUSY SHOULD be set
3810 * @pre VOL_LOCK is held
3812 * @internal volume package internal use only
3815 VVolOpSetVBusy_r(Volume * vp, FSSYNC_VolOp_info * vopinfo)
3817 return ((vopinfo->com.command == FSYNC_VOL_OFF &&
3818 vopinfo->com.reason == FSYNC_SALVAGE) ||
3819 (vopinfo->com.command == FSYNC_VOL_NEEDVOLUME &&
3820 (vopinfo->com.reason == V_CLONE ||
3821 vopinfo->com.reason == V_DUMP)));
3825 /***************************************************/
3826 /* online salvager routines */
3827 /***************************************************/
3828 #if defined(AFS_DEMAND_ATTACH_FS)
3829 #define SALVAGE_PRIO_UPDATE_INTERVAL 3 /**< number of seconds between prio updates */
3830 #define SALVAGE_COUNT_MAX 16 /**< number of online salvages we
3831 * allow before moving the volume
3832 * into a permanent error state
3834 * once this threshold is reached,
3835 * the operator will have to manually
3836 * issue a 'bos salvage' to bring
3837 * the volume back online
3841 * check whether a salvage needs to be performed on this volume.
3843 * @param[in] vp pointer to volume object
3845 * @return status code
3846 * @retval 0 no salvage scheduled
3847 * @retval 1 a salvage has been scheduled with the salvageserver
3849 * @pre VOL_LOCK is held
3851 * @post if salvage request flag is set and nUsers and nWaiters are zero,
3852 * then a salvage will be requested
3854 * @note this is one of the event handlers called by VCancelReservation_r
3856 * @see VCancelReservation_r
3858 * @internal volume package internal use only.
3861 VCheckSalvage(register Volume * vp)
3864 #ifdef SALVSYNC_BUILD_CLIENT
3865 if (vp->nUsers || vp->nWaiters)
3867 if (vp->salvage.requested) {
3868 VScheduleSalvage_r(vp);
3871 #endif /* SALVSYNC_BUILD_CLIENT */
3876 * request volume salvage.
3878 * @param[out] ec computed client error code
3879 * @param[in] vp volume object pointer
3880 * @param[in] reason reason code (passed to salvageserver via SALVSYNC)
3881 * @param[in] flags see flags note below
3884 * VOL_SALVAGE_INVALIDATE_HEADER causes volume header cache entry
3885 * to be invalidated.
3887 * @pre VOL_LOCK is held.
3889 * @post volume state is changed.
3890 * for fileserver, salvage will be requested once refcount reaches zero.
3892 * @return operation status code
3893 * @retval 0 volume salvage will occur
3894 * @retval 1 volume salvage could not be scheduled
3896 * @note DAFS fileserver only
3898 * @note this call does not synchronously schedule a volume salvage. rather,
3899 * it sets volume state so that when volume refcounts reach zero, a
3900 * volume salvage will occur. by "refcounts", we mean both nUsers and
3901 * nWaiters must be zero.
3903 * @internal volume package internal use only.
3906 VRequestSalvage_r(Error * ec, Volume * vp, int reason, int flags)
3910 * for DAFS volume utilities, transition to error state
3911 * (at some point in the future, we should consider
3912 * making volser talk to salsrv)
3914 if (programType != fileServer) {
3915 VChangeState_r(vp, VOL_STATE_ERROR);
3920 if (!vp->salvage.requested) {
3921 vp->salvage.requested = 1;
3922 vp->salvage.reason = reason;
3923 vp->stats.last_salvage = FT_ApproxTime();
3924 if (flags & VOL_SALVAGE_INVALIDATE_HEADER) {
3925 /* XXX this should likely be changed to FreeVolumeHeader() */
3926 ReleaseVolumeHeader(vp->header);
3928 if (vp->stats.salvages < SALVAGE_COUNT_MAX) {
3929 VChangeState_r(vp, VOL_STATE_SALVAGING);
3932 Log("VRequestSalvage: volume %u online salvaged too many times; forced offline.\n", vp->hashid);
3933 VChangeState_r(vp, VOL_STATE_ERROR);
3942 * update salvageserver scheduling priority for a volume.
3944 * @param[in] vp pointer to volume object
3946 * @return operation status
3948 * @retval 1 request denied, or SALVSYNC communications failure
3950 * @pre VOL_LOCK is held.
3952 * @post in-core salvage priority counter is incremented. if at least
3953 * SALVAGE_PRIO_UPDATE_INTERVAL seconds have elapsed since the
3954 * last SALVSYNC_RAISEPRIO request, we contact the salvageserver
3955 * to update its priority queue. if no salvage is scheduled,
3956 * this function is a no-op.
3958 * @note DAFS fileserver only
3960 * @note this should be called whenever a VGetVolume fails due to a
3961 * pending salvage request
3963 * @todo should set exclusive state and drop glock around salvsync call
3965 * @internal volume package internal use only.
3968 VUpdateSalvagePriority_r(Volume * vp)
3973 #ifdef SALVSYNC_BUILD_CLIENT
3975 now = FT_ApproxTime();
3977 /* update the salvageserver priority queue occasionally so that
3978 * frequently requested volumes get moved to the head of the queue
3980 if ((vp->salvage.scheduled) &&
3981 (vp->stats.last_salvage_req < (now-SALVAGE_PRIO_UPDATE_INTERVAL))) {
3982 code = SALVSYNC_SalvageVolume(vp->hashid,
3983 VPartitionPath(vp->partition),
3988 vp->stats.last_salvage_req = now;
3989 if (code != SYNC_OK) {
3993 #endif /* SALVSYNC_BUILD_CLIENT */
3999 * schedule a salvage with the salvage server.
4001 * @param[in] vp pointer to volume object
4003 * @return operation status
4004 * @retval 0 salvage scheduled successfully
4005 * @retval 1 salvage not scheduled, or SALVSYNC com error
4008 * @arg VOL_LOCK is held.
4009 * @arg nUsers and nWaiters should be zero.
4011 * @post salvageserver is sent a salvage request
4013 * @note DAFS fileserver only
4015 * @internal volume package internal use only.
4018 VScheduleSalvage_r(Volume * vp)
4021 #ifdef SALVSYNC_BUILD_CLIENT
4022 VolState state_save;
4023 VThreadOptions_t * thread_opts;
4026 if (vp->nWaiters || vp->nUsers) {
4030 /* prevent endless salvage,attach,salvage,attach,... loops */
4031 if (vp->stats.salvages >= SALVAGE_COUNT_MAX)
4035 * don't perform salvsync ops on certain threads
4037 thread_opts = pthread_getspecific(VThread_key);
4038 if (thread_opts == NULL) {
4039 thread_opts = &VThread_defaults;
4041 if (thread_opts->disallow_salvsync) {
4046 * XXX the scheduling process should really be done asynchronously
4047 * to avoid fssync deadlocks
4049 if (!vp->salvage.scheduled) {
4050 /* if we haven't previously scheduled a salvage, do so now
4052 * set the volume to an exclusive state and drop the lock
4053 * around the SALVSYNC call
4055 * note that we do NOT acquire a reservation here -- doing so
4056 * could result in unbounded recursion
4058 strlcpy(partName, VPartitionPath(vp->partition), sizeof(partName));
4059 state_save = VChangeState_r(vp, VOL_STATE_SALVSYNC_REQ);
4062 /* can't use V_id() since there's no guarantee
4063 * we have the disk data header at this point */
4064 code = SALVSYNC_SalvageVolume(vp->hashid,
4071 VChangeState_r(vp, state_save);
4073 if (code == SYNC_OK) {
4074 vp->salvage.scheduled = 1;
4075 vp->stats.salvages++;
4076 vp->stats.last_salvage_req = FT_ApproxTime();
4077 IncUInt64(&VStats.salvages);
4081 case SYNC_BAD_COMMAND:
4082 case SYNC_COM_ERROR:
4085 Log("VScheduleSalvage_r: SALVSYNC request denied\n");
4088 Log("VScheduleSalvage_r: SALVSYNC unknown protocol error\n");
4093 #endif /* SALVSYNC_BUILD_CLIENT */
4098 * ask salvageserver to cancel a scheduled salvage operation.
4100 * @param[in] vp pointer to volume object
4101 * @param[in] reason SALVSYNC protocol reason code
4103 * @return operation status
4105 * @retval 1 request failed
4107 * @pre VOL_LOCK is held.
4109 * @post salvageserver is sent a request to cancel the volume salvage.
4110 * volume is transitioned to a hard error state.
4112 * @internal volume package internal use only.
4115 VCancelSalvage_r(Volume * vp, int reason)
4119 #ifdef SALVSYNC_BUILD_CLIENT
4120 if (vp->salvage.scheduled) {
4121 VChangeState_r(vp, VOL_STATE_SALVSYNC_REQ);
4124 /* can't use V_id() since there's no guarantee
4125 * we have the disk data header at this point */
4126 code = SALVSYNC_SalvageVolume(vp->hashid,
4127 VPartitionPath(vp->partition),
4134 VChangeState_r(vp, VOL_STATE_ERROR);
4136 if (code == SYNC_OK) {
4137 vp->salvage.scheduled = 0;
4138 vp->salvage.requested = 0;
4143 #endif /* SALVSYNC_BUILD_CLIENT */
4148 #ifdef SALVSYNC_BUILD_CLIENT
4150 * connect to the salvageserver SYNC service.
4152 * @return operation status
4156 * @post connection to salvageserver SYNC service established
4158 * @see VConnectSALV_r
4159 * @see VDisconnectSALV
4160 * @see VReconnectSALV
4167 retVal = VConnectSALV_r();
4173 * connect to the salvageserver SYNC service.
4175 * @return operation status
4179 * @pre VOL_LOCK is held.
4181 * @post connection to salvageserver SYNC service established
4184 * @see VDisconnectSALV_r
4185 * @see VReconnectSALV_r
4186 * @see SALVSYNC_clientInit
4188 * @internal volume package internal use only.
4191 VConnectSALV_r(void)
4193 return SALVSYNC_clientInit();
4197 * disconnect from the salvageserver SYNC service.
4199 * @return operation status
4202 * @pre client should have a live connection to the salvageserver
4204 * @post connection to salvageserver SYNC service destroyed
4206 * @see VDisconnectSALV_r
4208 * @see VReconnectSALV
4211 VDisconnectSALV(void)
4215 VDisconnectSALV_r();
4221 * disconnect from the salvageserver SYNC service.
4223 * @return operation status
4227 * @arg VOL_LOCK is held.
4228 * @arg client should have a live connection to the salvageserver.
4230 * @post connection to salvageserver SYNC service destroyed
4232 * @see VDisconnectSALV
4233 * @see VConnectSALV_r
4234 * @see VReconnectSALV_r
4235 * @see SALVSYNC_clientFinis
4237 * @internal volume package internal use only.
4240 VDisconnectSALV_r(void)
4242 return SALVSYNC_clientFinis();
4246 * disconnect and then re-connect to the salvageserver SYNC service.
4248 * @return operation status
4252 * @pre client should have a live connection to the salvageserver
4254 * @post old connection is dropped, and a new one is established
4257 * @see VDisconnectSALV
4258 * @see VReconnectSALV_r
4261 VReconnectSALV(void)
4265 retVal = VReconnectSALV_r();
4271 * disconnect and then re-connect to the salvageserver SYNC service.
4273 * @return operation status
4278 * @arg VOL_LOCK is held.
4279 * @arg client should have a live connection to the salvageserver.
4281 * @post old connection is dropped, and a new one is established
4283 * @see VConnectSALV_r
4284 * @see VDisconnectSALV
4285 * @see VReconnectSALV
4286 * @see SALVSYNC_clientReconnect
4288 * @internal volume package internal use only.
4291 VReconnectSALV_r(void)
4293 return SALVSYNC_clientReconnect();
4295 #endif /* SALVSYNC_BUILD_CLIENT */
4296 #endif /* AFS_DEMAND_ATTACH_FS */
4299 /***************************************************/
4300 /* FSSYNC routines */
4301 /***************************************************/
4303 /* This must be called by any volume utility which needs to run while the
4304 file server is also running. This is separated from VInitVolumePackage so
4305 that a utility can fork--and each of the children can independently
4306 initialize communication with the file server */
4307 #ifdef FSSYNC_BUILD_CLIENT
4309 * connect to the fileserver SYNC service.
4311 * @return operation status
4316 * @arg VInit must equal 2.
4317 * @arg Program Type must not be fileserver or salvager.
4319 * @post connection to fileserver SYNC service established
4322 * @see VDisconnectFS
4323 * @see VChildProcReconnectFS
4330 retVal = VConnectFS_r();
4336 * connect to the fileserver SYNC service.
4338 * @return operation status
4343 * @arg VInit must equal 2.
4344 * @arg Program Type must not be fileserver or salvager.
4345 * @arg VOL_LOCK is held.
4347 * @post connection to fileserver SYNC service established
4350 * @see VDisconnectFS_r
4351 * @see VChildProcReconnectFS_r
4353 * @internal volume package internal use only.
4359 assert((VInit == 2) &&
4360 (programType != fileServer) &&
4361 (programType != salvager));
4362 rc = FSYNC_clientInit();
4369 * disconnect from the fileserver SYNC service.
4372 * @arg client should have a live connection to the fileserver.
4373 * @arg VOL_LOCK is held.
4374 * @arg Program Type must not be fileserver or salvager.
4376 * @post connection to fileserver SYNC service destroyed
4378 * @see VDisconnectFS
4380 * @see VChildProcReconnectFS_r
4382 * @internal volume package internal use only.
4385 VDisconnectFS_r(void)
4387 assert((programType != fileServer) &&
4388 (programType != salvager));
4389 FSYNC_clientFinis();
4394 * disconnect from the fileserver SYNC service.
4397 * @arg client should have a live connection to the fileserver.
4398 * @arg Program Type must not be fileserver or salvager.
4400 * @post connection to fileserver SYNC service destroyed
4402 * @see VDisconnectFS_r
4404 * @see VChildProcReconnectFS
4415 * connect to the fileserver SYNC service from a child process following a fork.
4417 * @return operation status
4422 * @arg VOL_LOCK is held.
4423 * @arg current FSYNC handle is shared with a parent process
4425 * @post current FSYNC handle is discarded and a new connection to the
4426 * fileserver SYNC service is established
4428 * @see VChildProcReconnectFS
4430 * @see VDisconnectFS_r
4432 * @internal volume package internal use only.
4435 VChildProcReconnectFS_r(void)
4437 return FSYNC_clientChildProcReconnect();
4441 * connect to the fileserver SYNC service from a child process following a fork.
4443 * @return operation status
4447 * @pre current FSYNC handle is shared with a parent process
4449 * @post current FSYNC handle is discarded and a new connection to the
4450 * fileserver SYNC service is established
4452 * @see VChildProcReconnectFS_r
4454 * @see VDisconnectFS
4457 VChildProcReconnectFS(void)
4461 ret = VChildProcReconnectFS_r();
4465 #endif /* FSSYNC_BUILD_CLIENT */
4468 /***************************************************/
4469 /* volume bitmap routines */
4470 /***************************************************/
4473 * For demand attach fs, flags parameter controls
4474 * locking behavior. If (flags & VOL_ALLOC_BITMAP_WAIT)
4475 * is set, then this function will create a reservation
4476 * and block on any other exclusive operations. Otherwise,
4477 * this function assumes the caller already has exclusive
4478 * access to vp, and we just change the volume state.
4481 VAllocBitmapEntry_r(Error * ec, Volume * vp,
4482 struct vnodeIndex *index, int flags)
4485 register byte *bp, *ep;
4486 #ifdef AFS_DEMAND_ATTACH_FS
4487 VolState state_save;
4488 #endif /* AFS_DEMAND_ATTACH_FS */
4492 /* This test is probably redundant */
4493 if (!VolumeWriteable(vp)) {
4494 *ec = (bit32) VREADONLY;
4498 #ifdef AFS_DEMAND_ATTACH_FS
4499 if (flags & VOL_ALLOC_BITMAP_WAIT) {
4500 VCreateReservation_r(vp);
4501 VWaitExclusiveState_r(vp);
4503 state_save = VChangeState_r(vp, VOL_STATE_GET_BITMAP);
4504 #endif /* AFS_DEMAND_ATTACH_FS */
4507 if ((programType == fileServer) && !index->bitmap) {
4509 #ifndef AFS_DEMAND_ATTACH_FS
4510 /* demand attach fs uses the volume state to avoid races.
4511 * specialStatus field is not used at all */
4513 if (vp->specialStatus == VBUSY) {
4514 if (vp->goingOffline) { /* vos dump waiting for the volume to
4515 * go offline. We probably come here
4516 * from AddNewReadableResidency */
4519 while (vp->specialStatus == VBUSY) {
4520 #ifdef AFS_PTHREAD_ENV
4524 #else /* !AFS_PTHREAD_ENV */
4526 #endif /* !AFS_PTHREAD_ENV */
4530 #endif /* !AFS_DEMAND_ATTACH_FS */
4532 if (!index->bitmap) {
4533 #ifndef AFS_DEMAND_ATTACH_FS
4534 vp->specialStatus = VBUSY; /* Stop anyone else from using it. */
4535 #endif /* AFS_DEMAND_ATTACH_FS */
4536 for (i = 0; i < nVNODECLASSES; i++) {
4537 VGetBitmap_r(ec, vp, i);
4539 #ifdef AFS_DEMAND_ATTACH_FS
4540 VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
4541 #else /* AFS_DEMAND_ATTACH_FS */
4542 DeleteVolumeFromHashTable(vp);
4543 vp->shuttingDown = 1; /* Let who has it free it. */
4544 vp->specialStatus = 0;
4545 #endif /* AFS_DEMAND_ATTACH_FS */
4550 #ifndef AFS_DEMAND_ATTACH_FS
4552 vp->specialStatus = 0; /* Allow others to have access. */
4553 #endif /* AFS_DEMAND_ATTACH_FS */
4556 #endif /* BITMAP_LATER */
4558 #ifdef AFS_DEMAND_ATTACH_FS
4560 #endif /* AFS_DEMAND_ATTACH_FS */
4561 bp = index->bitmap + index->bitmapOffset;
4562 ep = index->bitmap + index->bitmapSize;
4564 if ((*(bit32 *) bp) != (bit32) 0xffffffff) {
4566 index->bitmapOffset = (afs_uint32) (bp - index->bitmap);
4569 o = ffs(~*bp) - 1; /* ffs is documented in BSTRING(3) */
4571 ret = (VnodeId) ((bp - index->bitmap) * 8 + o);
4572 #ifdef AFS_DEMAND_ATTACH_FS
4574 #endif /* AFS_DEMAND_ATTACH_FS */
4577 bp += sizeof(bit32) /* i.e. 4 */ ;
4579 /* No bit map entry--must grow bitmap */
4581 realloc(index->bitmap, index->bitmapSize + VOLUME_BITMAP_GROWSIZE);
4584 bp += index->bitmapSize;
4585 memset(bp, 0, VOLUME_BITMAP_GROWSIZE);
4586 index->bitmapOffset = index->bitmapSize;
4587 index->bitmapSize += VOLUME_BITMAP_GROWSIZE;
4589 ret = index->bitmapOffset * 8;
4590 #ifdef AFS_DEMAND_ATTACH_FS
4592 #endif /* AFS_DEMAND_ATTACH_FS */
4595 #ifdef AFS_DEMAND_ATTACH_FS
4596 VChangeState_r(vp, state_save);
4597 if (flags & VOL_ALLOC_BITMAP_WAIT) {
4598 VCancelReservation_r(vp);
4600 #endif /* AFS_DEMAND_ATTACH_FS */
4605 VAllocBitmapEntry(Error * ec, Volume * vp, register struct vnodeIndex * index)
4609 retVal = VAllocBitmapEntry_r(ec, vp, index, VOL_ALLOC_BITMAP_WAIT);
4615 VFreeBitMapEntry_r(Error * ec, register struct vnodeIndex *index,
4618 unsigned int offset;
4624 #endif /* BITMAP_LATER */
4625 offset = bitNumber >> 3;
4626 if (offset >= index->bitmapSize) {
4630 if (offset < index->bitmapOffset)
4631 index->bitmapOffset = offset & ~3; /* Truncate to nearest bit32 */
4632 *(index->bitmap + offset) &= ~(1 << (bitNumber & 0x7));
4636 VFreeBitMapEntry(Error * ec, register struct vnodeIndex *index,
4640 VFreeBitMapEntry_r(ec, index, bitNumber);
4644 /* this function will drop the glock internally.
4645 * for old pthread fileservers, this is safe thanks to vbusy.
4647 * for demand attach fs, caller must have already called
4648 * VCreateReservation_r and VWaitExclusiveState_r */
4650 VGetBitmap_r(Error * ec, Volume * vp, VnodeClass class)
4652 StreamHandle_t *file;
4655 struct VnodeClassInfo *vcp = &VnodeClassInfo[class];
4656 struct vnodeIndex *vip = &vp->vnodeIndex[class];
4657 struct VnodeDiskObject *vnode;
4658 unsigned int unique = 0;
4662 #endif /* BITMAP_LATER */
4663 #ifdef AFS_DEMAND_ATTACH_FS
4664 VolState state_save;
4665 #endif /* AFS_DEMAND_ATTACH_FS */
4669 #ifdef AFS_DEMAND_ATTACH_FS
4670 state_save = VChangeState_r(vp, VOL_STATE_GET_BITMAP);
4671 #endif /* AFS_DEMAND_ATTACH_FS */
4674 fdP = IH_OPEN(vip->handle);
4675 assert(fdP != NULL);
4676 file = FDH_FDOPEN(fdP, "r");
4677 assert(file != NULL);
4678 vnode = (VnodeDiskObject *) malloc(vcp->diskSize);
4679 assert(vnode != NULL);
4680 size = OS_SIZE(fdP->fd_fd);
4682 nVnodes = (size <= vcp->diskSize ? 0 : size - vcp->diskSize)
4684 vip->bitmapSize = ((nVnodes / 8) + 10) / 4 * 4; /* The 10 is a little extra so
4685 * a few files can be created in this volume,
4686 * the whole thing is rounded up to nearest 4
4687 * bytes, because the bit map allocator likes
4690 BitMap = (byte *) calloc(1, vip->bitmapSize);
4691 assert(BitMap != NULL);
4692 #else /* BITMAP_LATER */
4693 vip->bitmap = (byte *) calloc(1, vip->bitmapSize);
4694 assert(vip->bitmap != NULL);
4695 vip->bitmapOffset = 0;
4696 #endif /* BITMAP_LATER */
4697 if (STREAM_SEEK(file, vcp->diskSize, 0) != -1) {
4699 for (bitNumber = 0; bitNumber < nVnodes + 100; bitNumber++) {
4700 if (STREAM_READ(vnode, vcp->diskSize, 1, file) != 1)
4702 if (vnode->type != vNull) {
4703 if (vnode->vnodeMagic != vcp->magic) {
4704 Log("GetBitmap: addled vnode index in volume %s; volume needs salvage\n", V_name(vp));
4709 *(BitMap + (bitNumber >> 3)) |= (1 << (bitNumber & 0x7));
4710 #else /* BITMAP_LATER */
4711 *(vip->bitmap + (bitNumber >> 3)) |= (1 << (bitNumber & 0x7));
4712 #endif /* BITMAP_LATER */
4713 if (unique <= vnode->uniquifier)
4714 unique = vnode->uniquifier + 1;
4716 #ifndef AFS_PTHREAD_ENV
4717 if ((bitNumber & 0x00ff) == 0x0ff) { /* every 256 iterations */
4720 #endif /* !AFS_PTHREAD_ENV */
4723 if (vp->nextVnodeUnique < unique) {
4724 Log("GetBitmap: bad volume uniquifier for volume %s; volume needs salvage\n", V_name(vp));
4727 /* Paranoia, partly justified--I think fclose after fdopen
4728 * doesn't seem to close fd. In any event, the documentation
4729 * doesn't specify, so it's safer to close it twice.
4737 /* There may have been a racing condition with some other thread, both
4738 * creating the bitmaps for this volume. If the other thread was faster
4739 * the pointer to bitmap should already be filled and we can free ours.
4741 if (vip->bitmap == NULL) {
4742 vip->bitmap = BitMap;
4743 vip->bitmapOffset = 0;
4745 free((byte *) BitMap);
4746 #endif /* BITMAP_LATER */
4747 #ifdef AFS_DEMAND_ATTACH_FS
4748 VChangeState_r(vp, state_save);
4749 #endif /* AFS_DEMAND_ATTACH_FS */
4753 /***************************************************/
4754 /* Volume Path and Volume Number utility routines */
4755 /***************************************************/
4758 * find the first occurrence of a volume header file and return the path.
4760 * @param[out] ec outbound error code
4761 * @param[in] volumeId volume id to find
4762 * @param[out] partitionp pointer to disk partition path string
4763 * @param[out] namep pointer to volume header file name string
4765 * @post path to first occurrence of volume header is returned in partitionp
4766 * and namep, or ec is set accordingly.
4768 * @warning this function is NOT re-entrant -- partitionp and namep point to
4769 * static data segments
4771 * @note if a volume utility inadvertently leaves behind a stale volume header
4772 * on a vice partition, it is possible for callers to get the wrong one,
4773 * depending on the order of the disk partition linked list.
4777 VGetVolumePath(Error * ec, VolId volumeId, char **partitionp, char **namep)
4779 static char partition[VMAXPATHLEN], name[VMAXPATHLEN];
4780 char path[VMAXPATHLEN];
4782 struct DiskPartition64 *dp;
4786 (void)afs_snprintf(&name[1], (sizeof name) - 1, VFORMAT, volumeId);
4787 for (dp = DiskPartitionList; dp; dp = dp->next) {
4788 struct afs_stat status;
4789 strcpy(path, VPartitionPath(dp));
4791 if (afs_stat(path, &status) == 0) {
4792 strcpy(partition, dp->name);
4799 *partitionp = *namep = NULL;
4801 *partitionp = partition;
4807 * extract a volume number from a volume header filename string.
4809 * @param[in] name volume header filename string
4811 * @return volume number
4813 * @note the string must be of the form VFORMAT. the only permissible
4814 * deviation is a leading '/' character.
4819 VolumeNumber(char *name)
4823 return atoi(name + 1);
4827 * compute the volume header filename.
4829 * @param[in] volumeId
4831 * @return volume header filename
4833 * @post volume header filename string is constructed
4835 * @warning this function is NOT re-entrant -- the returned string is
4836 * stored in a static char array. see VolumeExternalName_r
4837 * for a re-entrant equivalent.
4839 * @see VolumeExternalName_r
4841 * @deprecated due to the above re-entrancy warning, this interface should
4842 * be considered deprecated. Please use VolumeExternalName_r
4846 VolumeExternalName(VolumeId volumeId)
4848 static char name[VMAXPATHLEN];
4849 (void)afs_snprintf(name, sizeof name, VFORMAT, volumeId);
4854 * compute the volume header filename.
4856 * @param[in] volumeId
4857 * @param[inout] name array in which to store filename
4858 * @param[in] len length of name array
4860 * @return result code from afs_snprintf
4862 * @see VolumeExternalName
4865 * @note re-entrant equivalent of VolumeExternalName
4867 * @internal volume package internal use only.
4870 VolumeExternalName_r(VolumeId volumeId, char * name, size_t len)
4872 return afs_snprintf(name, len, VFORMAT, volumeId);
4876 /***************************************************/
4877 /* Volume Usage Statistics routines */
4878 /***************************************************/
4880 #if OPENAFS_VOL_STATS
4881 #define OneDay (86400) /* 24 hours' worth of seconds */
4883 #define OneDay (24*60*60) /* 24 hours */
4884 #endif /* OPENAFS_VOL_STATS */
4886 #define Midnight(date) ((date-TimeZoneCorrection)/OneDay*OneDay+TimeZoneCorrection)
4888 /*------------------------------------------------------------------------
4889 * [export] VAdjustVolumeStatistics
4892 * If we've passed midnight, we need to update all the day use
4893 * statistics as well as zeroing the detailed volume statistics
4894 * (if we are implementing them).
4897 * vp : Pointer to the volume structure describing the lucky
4898 * volume being considered for update.
4904 * Nothing interesting.
4908 *------------------------------------------------------------------------*/
4911 VAdjustVolumeStatistics_r(register Volume * vp)
4913 unsigned int now = FT_ApproxTime();
4915 if (now - V_dayUseDate(vp) > OneDay) {
4916 register int ndays, i;
4918 ndays = (now - V_dayUseDate(vp)) / OneDay;
4919 for (i = 6; i > ndays - 1; i--)
4920 V_weekUse(vp)[i] = V_weekUse(vp)[i - ndays];
4921 for (i = 0; i < ndays - 1 && i < 7; i++)
4922 V_weekUse(vp)[i] = 0;
4924 V_weekUse(vp)[ndays - 1] = V_dayUse(vp);
4926 V_dayUseDate(vp) = Midnight(now);
4928 #if OPENAFS_VOL_STATS
4930 * All we need to do is bzero the entire VOL_STATS_BYTES of
4931 * the detailed volume statistics area.
4933 memset((char *)(V_stat_area(vp)), 0, VOL_STATS_BYTES);
4934 #endif /* OPENAFS_VOL_STATS */
4937 /*It's been more than a day of collection */
4939 * Always return happily.
4942 } /*VAdjustVolumeStatistics */
4945 VAdjustVolumeStatistics(register Volume * vp)
4949 retVal = VAdjustVolumeStatistics_r(vp);
4955 VBumpVolumeUsage_r(register Volume * vp)
4957 unsigned int now = FT_ApproxTime();
4958 if (now - V_dayUseDate(vp) > OneDay)
4959 VAdjustVolumeStatistics_r(vp);
4961 * Save the volume header image to disk after every 128 bumps to dayUse.
4963 if ((V_dayUse(vp)++ & 127) == 0) {
4965 VUpdateVolume_r(&error, vp, VOL_UPDATE_WAIT);
4970 VBumpVolumeUsage(register Volume * vp)
4973 VBumpVolumeUsage_r(vp);
4978 VSetDiskUsage_r(void)
4980 #ifndef AFS_DEMAND_ATTACH_FS
4981 static int FifteenMinuteCounter = 0;
4985 /* NOTE: Don't attempt to access the partitions list until the
4986 * initialization level indicates that all volumes are attached,
4987 * which implies that all partitions are initialized. */
4988 #ifdef AFS_PTHREAD_ENV
4990 #else /* AFS_PTHREAD_ENV */
4992 #endif /* AFS_PTHREAD_ENV */
4995 VResetDiskUsage_r();
4997 #ifndef AFS_DEMAND_ATTACH_FS
4998 if (++FifteenMinuteCounter == 3) {
4999 FifteenMinuteCounter = 0;
5002 #endif /* !AFS_DEMAND_ATTACH_FS */
5014 /***************************************************/
5015 /* Volume Update List routines */
5016 /***************************************************/
5018 /* The number of minutes that a volume hasn't been updated before the
5019 * "Dont salvage" flag in the volume header will be turned on */
5020 #define SALVAGE_INTERVAL (10*60)
5025 * volume update list functionality has been moved into the VLRU
5026 * the DONT_SALVAGE flag is now set during VLRU demotion
5029 #ifndef AFS_DEMAND_ATTACH_FS
5030 static VolumeId *UpdateList = NULL; /* Pointer to array of Volume ID's */
5031 static int nUpdatedVolumes = 0; /* Updated with entry in UpdateList, salvage after crash flag on */
5032 static int updateSize = 0; /* number of entries possible */
5033 #define UPDATE_LIST_SIZE 128 /* initial size increment (must be a power of 2!) */
5034 #endif /* !AFS_DEMAND_ATTACH_FS */
5037 VAddToVolumeUpdateList_r(Error * ec, Volume * vp)
5040 vp->updateTime = FT_ApproxTime();
5041 if (V_dontSalvage(vp) == 0)
5043 V_dontSalvage(vp) = 0;
5044 VSyncVolume_r(ec, vp, 0);
5045 #ifdef AFS_DEMAND_ATTACH_FS
5046 V_attachFlags(vp) &= ~(VOL_HDR_DONTSALV);
5047 #else /* !AFS_DEMAND_ATTACH_FS */
5050 if (UpdateList == NULL) {
5051 updateSize = UPDATE_LIST_SIZE;
5052 UpdateList = (VolumeId *) malloc(sizeof(VolumeId) * updateSize);
5054 if (nUpdatedVolumes == updateSize) {
5056 if (updateSize > 524288) {
5057 Log("warning: there is likely a bug in the volume update scanner\n");
5061 (VolumeId *) realloc(UpdateList,
5062 sizeof(VolumeId) * updateSize);
5065 assert(UpdateList != NULL);
5066 UpdateList[nUpdatedVolumes++] = V_id(vp);
5067 #endif /* !AFS_DEMAND_ATTACH_FS */
5070 #ifndef AFS_DEMAND_ATTACH_FS
5072 VScanUpdateList(void)
5074 register int i, gap;
5075 register Volume *vp;
5077 afs_uint32 now = FT_ApproxTime();
5078 /* Be careful with this code, since it works with interleaved calls to AddToVolumeUpdateList */
5079 for (i = gap = 0; i < nUpdatedVolumes; i++) {
5081 UpdateList[i - gap] = UpdateList[i];
5083 /* XXX this routine needlessly messes up the Volume LRU by
5084 * breaking the LRU temporal-locality assumptions.....
5085 * we should use a special volume header allocator here */
5086 vp = VGetVolume_r(&error, UpdateList[i - gap] = UpdateList[i]);
5089 } else if (vp->nUsers == 1 && now - vp->updateTime > SALVAGE_INTERVAL) {
5090 V_dontSalvage(vp) = DONT_SALVAGE;
5091 VUpdateVolume_r(&error, vp, 0); /* No need to fsync--not critical */
5099 #ifndef AFS_PTHREAD_ENV
5101 #endif /* !AFS_PTHREAD_ENV */
5103 nUpdatedVolumes -= gap;
5105 #endif /* !AFS_DEMAND_ATTACH_FS */
5108 /***************************************************/
5109 /* Volume LRU routines */
5110 /***************************************************/
5115 * with demand attach fs, we attempt to soft detach(1)
5116 * volumes which have not been accessed in a long time
5117 * in order to speed up fileserver shutdown
5119 * (1) by soft detach we mean a process very similar
5120 * to VOffline, except the final state of the
5121 * Volume will be VOL_STATE_PREATTACHED, instead
5122 * of the usual VOL_STATE_UNATTACHED
5124 #ifdef AFS_DEMAND_ATTACH_FS
5126 /* implementation is reminiscent of a generational GC
5128 * queue 0 is newly attached volumes. this queue is
5129 * sorted by attach timestamp
5131 * queue 1 is volumes that have been around a bit
5132 * longer than queue 0. this queue is sorted by
5135 * queue 2 is volumes tha have been around the longest.
5136 * this queue is unsorted
5138 * queue 3 is volumes that have been marked as
5139 * candidates for soft detachment. this queue is
5142 #define VLRU_GENERATIONS 3 /**< number of generations in VLRU */
5143 #define VLRU_QUEUES 5 /**< total number of VLRU queues */
5146 * definition of a VLRU queue.
5149 volatile struct rx_queue q;
5156 * main VLRU data structure.
5159 struct VLRU_q q[VLRU_QUEUES]; /**< VLRU queues */
5162 /** time interval (in seconds) between promotion passes for
5163 * each young generation queue. */
5164 afs_uint32 promotion_interval[VLRU_GENERATIONS-1];
5166 /** time interval (in seconds) between soft detach candidate
5167 * scans for each generation queue.
5169 * scan_interval[VLRU_QUEUE_CANDIDATE] defines how frequently
5170 * we perform a soft detach pass. */
5171 afs_uint32 scan_interval[VLRU_GENERATIONS+1];
5173 /* scheduler state */
5174 int next_idx; /**< next queue to receive attention */
5175 afs_uint32 last_promotion[VLRU_GENERATIONS-1]; /**< timestamp of last promotion scan */
5176 afs_uint32 last_scan[VLRU_GENERATIONS+1]; /**< timestamp of last detach scan */
5178 int scanner_state; /**< state of scanner thread */
5179 pthread_cond_t cv; /**< state transition CV */
5182 /** global VLRU state */
5183 static struct VLRU volume_LRU;
5186 * defined states for VLRU scanner thread.
5189 VLRU_SCANNER_STATE_OFFLINE = 0, /**< vlru scanner thread is offline */
5190 VLRU_SCANNER_STATE_ONLINE = 1, /**< vlru scanner thread is online */
5191 VLRU_SCANNER_STATE_SHUTTING_DOWN = 2, /**< vlru scanner thread is shutting down */
5192 VLRU_SCANNER_STATE_PAUSING = 3, /**< vlru scanner thread is getting ready to pause */
5193 VLRU_SCANNER_STATE_PAUSED = 4 /**< vlru scanner thread is paused */
5194 } vlru_thread_state_t;
5196 /* vlru disk data header stuff */
5197 #define VLRU_DISK_MAGIC 0x7a8b9cad /**< vlru disk entry magic number */
5198 #define VLRU_DISK_VERSION 1 /**< vlru disk entry version number */
5200 /** vlru default expiration time (for eventual fs state serialization of vlru data) */
5201 #define VLRU_DUMP_EXPIRATION_TIME (60*60*24*7) /* expire vlru data after 1 week */
5204 /** minimum volume inactivity (in seconds) before a volume becomes eligible for
5205 * soft detachment. */
5206 static afs_uint32 VLRU_offline_thresh = VLRU_DEFAULT_OFFLINE_THRESH;
5208 /** time interval (in seconds) between VLRU scanner thread soft detach passes. */
5209 static afs_uint32 VLRU_offline_interval = VLRU_DEFAULT_OFFLINE_INTERVAL;
5211 /** maximum number of volumes to soft detach in a VLRU soft detach pass. */
5212 static afs_uint32 VLRU_offline_max = VLRU_DEFAULT_OFFLINE_MAX;
5214 /** VLRU control flag. non-zero value implies VLRU subsystem is activated. */
5215 static afs_uint32 VLRU_enabled = 1;
5217 /* queue synchronization routines */
5218 static void VLRU_BeginExclusive_r(struct VLRU_q * q);
5219 static void VLRU_EndExclusive_r(struct VLRU_q * q);
5220 static void VLRU_Wait_r(struct VLRU_q * q);
5223 * set VLRU subsystem tunable parameters.
5225 * @param[in] option tunable option to modify
5226 * @param[in] val new value for tunable parameter
5228 * @pre @c VInitVolumePackage has not yet been called.
5230 * @post tunable parameter is modified
5234 * @note valid option parameters are:
5235 * @arg @c VLRU_SET_THRESH
5236 * set the period of inactivity after which
5237 * volumes are eligible for soft detachment
5238 * @arg @c VLRU_SET_INTERVAL
5239 * set the time interval between calls
5240 * to the volume LRU "garbage collector"
5241 * @arg @c VLRU_SET_MAX
5242 * set the max number of volumes to deallocate
5246 VLRU_SetOptions(int option, afs_uint32 val)
5248 if (option == VLRU_SET_THRESH) {
5249 VLRU_offline_thresh = val;
5250 } else if (option == VLRU_SET_INTERVAL) {
5251 VLRU_offline_interval = val;
5252 } else if (option == VLRU_SET_MAX) {
5253 VLRU_offline_max = val;
5254 } else if (option == VLRU_SET_ENABLED) {
5257 VLRU_ComputeConstants();
5261 * compute VLRU internal timing parameters.
5263 * @post VLRU scanner thread internal timing parameters are computed
5265 * @note computes internal timing parameters based upon user-modifiable
5266 * tunable parameters.
5270 * @internal volume package internal use only.
5273 VLRU_ComputeConstants(void)
5275 afs_uint32 factor = VLRU_offline_thresh / VLRU_offline_interval;
5277 /* compute the candidate scan interval */
5278 volume_LRU.scan_interval[VLRU_QUEUE_CANDIDATE] = VLRU_offline_interval;
5280 /* compute the promotion intervals */
5281 volume_LRU.promotion_interval[VLRU_QUEUE_NEW] = VLRU_offline_thresh * 2;
5282 volume_LRU.promotion_interval[VLRU_QUEUE_MID] = VLRU_offline_thresh * 4;
5285 /* compute the gen 0 scan interval */
5286 volume_LRU.scan_interval[VLRU_QUEUE_NEW] = VLRU_offline_thresh / 8;
5288 /* compute the gen 0 scan interval */
5289 volume_LRU.scan_interval[VLRU_QUEUE_NEW] = VLRU_offline_interval * 2;
5294 * initialize VLRU subsystem.
5296 * @pre this function has not yet been called
5298 * @post VLRU subsystem is initialized and VLRU scanner thread is starting
5302 * @internal volume package internal use only.
5308 pthread_attr_t attrs;
5311 if (!VLRU_enabled) {
5312 Log("VLRU: disabled\n");
5316 /* initialize each of the VLRU queues */
5317 for (i = 0; i < VLRU_QUEUES; i++) {
5318 queue_Init(&volume_LRU.q[i]);
5319 volume_LRU.q[i].len = 0;
5320 volume_LRU.q[i].busy = 0;
5321 assert(pthread_cond_init(&volume_LRU.q[i].cv, NULL) == 0);
5324 /* setup the timing constants */
5325 VLRU_ComputeConstants();
5327 /* XXX put inside LogLevel check? */
5328 Log("VLRU: starting scanner with the following configuration parameters:\n");
5329 Log("VLRU: offlining volumes after minimum of %d seconds of inactivity\n", VLRU_offline_thresh);
5330 Log("VLRU: running VLRU soft detach pass every %d seconds\n", VLRU_offline_interval);
5331 Log("VLRU: taking up to %d volumes offline per pass\n", VLRU_offline_max);
5332 Log("VLRU: scanning generation 0 for inactive volumes every %d seconds\n", volume_LRU.scan_interval[0]);
5333 Log("VLRU: scanning for promotion/demotion between generations 0 and 1 every %d seconds\n", volume_LRU.promotion_interval[0]);
5334 Log("VLRU: scanning for promotion/demotion between generations 1 and 2 every %d seconds\n", volume_LRU.promotion_interval[1]);
5336 /* start up the VLRU scanner */
5337 volume_LRU.scanner_state = VLRU_SCANNER_STATE_OFFLINE;
5338 if (programType == fileServer) {
5339 assert(pthread_cond_init(&volume_LRU.cv, NULL) == 0);
5340 assert(pthread_attr_init(&attrs) == 0);
5341 assert(pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED) == 0);
5342 assert(pthread_create(&tid, &attrs, &VLRU_ScannerThread, NULL) == 0);
5347 * initialize the VLRU-related fields of a newly allocated volume object.
5349 * @param[in] vp pointer to volume object
5352 * @arg @c VOL_LOCK is held.
5353 * @arg volume object is not on a VLRU queue.
5355 * @post VLRU fields are initialized to indicate that volume object is not
5356 * currently registered with the VLRU subsystem
5360 * @internal volume package interal use only.
5363 VLRU_Init_Node_r(volatile Volume * vp)
5368 assert(queue_IsNotOnQueue(&vp->vlru));
5369 vp->vlru.idx = VLRU_QUEUE_INVALID;
5373 * add a volume object to a VLRU queue.
5375 * @param[in] vp pointer to volume object
5378 * @arg @c VOL_LOCK is held.
5379 * @arg caller MUST hold a lightweight ref on @p vp.
5380 * @arg caller MUST NOT hold exclusive ownership of the VLRU queue.
5382 * @post the volume object is added to the appropriate VLRU queue
5384 * @note if @c vp->vlru.idx contains the index of a valid VLRU queue,
5385 * then the volume is added to that queue. Otherwise, the value
5386 * @c VLRU_QUEUE_NEW is stored into @c vp->vlru.idx and the
5387 * volume is added to the NEW generation queue.
5389 * @note @c VOL_LOCK may be dropped internally
5391 * @note Volume state is temporarily set to @c VOL_STATE_VLRU_ADD
5392 * during the add operation, and is restored to the previous
5393 * state prior to return.
5397 * @internal volume package internal use only.
5400 VLRU_Add_r(volatile Volume * vp)
5403 VolState state_save;
5408 if (queue_IsOnQueue(&vp->vlru))
5411 state_save = VChangeState_r(vp, VOL_STATE_VLRU_ADD);
5414 if ((idx < 0) || (idx >= VLRU_QUEUE_INVALID)) {
5415 idx = VLRU_QUEUE_NEW;
5418 VLRU_Wait_r(&volume_LRU.q[idx]);
5420 /* repeat check since VLRU_Wait_r may have dropped
5422 if (queue_IsNotOnQueue(&vp->vlru)) {
5424 queue_Prepend(&volume_LRU.q[idx], &vp->vlru);
5425 volume_LRU.q[idx].len++;
5426 V_attachFlags(vp) |= VOL_ON_VLRU;
5427 vp->stats.last_promote = FT_ApproxTime();
5430 VChangeState_r(vp, state_save);
5434 * delete a volume object from a VLRU queue.
5436 * @param[in] vp pointer to volume object
5439 * @arg @c VOL_LOCK is held.
5440 * @arg caller MUST hold a lightweight ref on @p vp.
5441 * @arg caller MUST NOT hold exclusive ownership of the VLRU queue.
5443 * @post volume object is removed from the VLRU queue
5445 * @note @c VOL_LOCK may be dropped internally
5449 * @todo We should probably set volume state to something exlcusive
5450 * (as @c VLRU_Add_r does) prior to dropping @c VOL_LOCK.
5452 * @internal volume package internal use only.
5455 VLRU_Delete_r(volatile Volume * vp)
5462 if (queue_IsNotOnQueue(&vp->vlru))
5468 if (idx == VLRU_QUEUE_INVALID)
5470 VLRU_Wait_r(&volume_LRU.q[idx]);
5471 } while (idx != vp->vlru.idx);
5473 /* now remove from the VLRU and update
5474 * the appropriate counter */
5475 queue_Remove(&vp->vlru);
5476 volume_LRU.q[idx].len--;
5477 vp->vlru.idx = VLRU_QUEUE_INVALID;
5478 V_attachFlags(vp) &= ~(VOL_ON_VLRU);
5482 * tell the VLRU subsystem that a volume was just accessed.
5484 * @param[in] vp pointer to volume object
5487 * @arg @c VOL_LOCK is held
5488 * @arg caller MUST hold a lightweight ref on @p vp
5489 * @arg caller MUST NOT hold exclusive ownership of any VLRU queue
5491 * @post volume VLRU access statistics are updated. If the volume was on
5492 * the VLRU soft detach candidate queue, it is moved to the NEW
5495 * @note @c VOL_LOCK may be dropped internally
5499 * @internal volume package internal use only.
5502 VLRU_UpdateAccess_r(volatile Volume * vp)
5504 afs_uint32 live_interval;
5505 Volume * rvp = NULL;
5510 if (queue_IsNotOnQueue(&vp->vlru))
5513 assert(V_attachFlags(vp) & VOL_ON_VLRU);
5515 /* update the access timestamp */
5516 vp->stats.last_get = FT_ApproxTime();
5519 * if the volume is on the soft detach candidate
5520 * list, we need to safely move it back to a
5521 * regular generation. this has to be done
5522 * carefully so we don't race against the scanner
5526 /* if this volume is on the soft detach candidate queue,
5527 * then grab exclusive access to the necessary queues */
5528 if (vp->vlru.idx == VLRU_QUEUE_CANDIDATE) {
5530 VCreateReservation_r(rvp);
5532 VLRU_Wait_r(&volume_LRU.q[VLRU_QUEUE_NEW]);
5533 VLRU_BeginExclusive_r(&volume_LRU.q[VLRU_QUEUE_NEW]);
5534 VLRU_Wait_r(&volume_LRU.q[VLRU_QUEUE_CANDIDATE]);
5535 VLRU_BeginExclusive_r(&volume_LRU.q[VLRU_QUEUE_CANDIDATE]);
5538 /* make sure multiple threads don't race to update */
5539 if (vp->vlru.idx == VLRU_QUEUE_CANDIDATE) {
5540 VLRU_SwitchQueues(vp, VLRU_QUEUE_NEW, 1);
5544 VLRU_EndExclusive_r(&volume_LRU.q[VLRU_QUEUE_CANDIDATE]);
5545 VLRU_EndExclusive_r(&volume_LRU.q[VLRU_QUEUE_NEW]);
5546 VCancelReservation_r(rvp);
5551 * switch a volume between two VLRU queues.
5553 * @param[in] vp pointer to volume object
5554 * @param[in] new_idx index of VLRU queue onto which the volume will be moved
5555 * @param[in] append controls whether the volume will be appended or
5556 * prepended to the queue. A nonzero value means it will
5557 * be appended; zero means it will be prepended.
5559 * @pre The new (and old, if applicable) queue(s) must either be owned
5560 * exclusively by the calling thread for asynchronous manipulation,
5561 * or the queue(s) must be quiescent and VOL_LOCK must be held.
5562 * Please see VLRU_BeginExclusive_r, VLRU_EndExclusive_r and VLRU_Wait_r
5563 * for further details of the queue asynchronous processing mechanism.
5565 * @post If the volume object was already on a VLRU queue, it is
5566 * removed from the queue. Depending on the value of the append
5567 * parameter, the volume object is either appended or prepended
5568 * to the VLRU queue referenced by the new_idx parameter.
5572 * @see VLRU_BeginExclusive_r
5573 * @see VLRU_EndExclusive_r
5576 * @internal volume package internal use only.
5579 VLRU_SwitchQueues(volatile Volume * vp, int new_idx, int append)
5581 if (queue_IsNotOnQueue(&vp->vlru))
5584 queue_Remove(&vp->vlru);
5585 volume_LRU.q[vp->vlru.idx].len--;
5587 /* put the volume back on the correct generational queue */
5589 queue_Append(&volume_LRU.q[new_idx], &vp->vlru);
5591 queue_Prepend(&volume_LRU.q[new_idx], &vp->vlru);
5594 volume_LRU.q[new_idx].len++;
5595 vp->vlru.idx = new_idx;
5599 * VLRU background thread.
5601 * The VLRU Scanner Thread is responsible for periodically scanning through
5602 * each VLRU queue looking for volumes which should be moved to another
5603 * queue, or soft detached.
5605 * @param[in] args unused thread arguments parameter
5607 * @return unused thread return value
5608 * @retval NULL always
5610 * @internal volume package internal use only.
5613 VLRU_ScannerThread(void * args)
5615 afs_uint32 now, min_delay, delay;
5616 afs_uint32 next_scan[VLRU_GENERATIONS];
5617 afs_uint32 next_promotion[VLRU_GENERATIONS];
5618 int i, min_idx, min_op, overdue, state;
5620 /* set t=0 for promotion cycle to be
5621 * fileserver startup */
5622 now = FT_ApproxTime();
5623 for (i=0; i < VLRU_GENERATIONS-1; i++) {
5624 volume_LRU.last_promotion[i] = now;
5627 /* don't start the scanner until VLRU_offline_thresh
5628 * plus a small delay for VInitVolumePackage to finish
5631 sleep(VLRU_offline_thresh + 60);
5633 /* set t=0 for scan cycle to be now */
5634 now = FT_ApproxTime();
5635 for (i=0; i < VLRU_GENERATIONS+1; i++) {
5636 volume_LRU.last_scan[i] = now;
5640 if (volume_LRU.scanner_state == VLRU_SCANNER_STATE_OFFLINE) {
5641 volume_LRU.scanner_state = VLRU_SCANNER_STATE_ONLINE;
5644 while ((state = volume_LRU.scanner_state) != VLRU_SCANNER_STATE_SHUTTING_DOWN) {
5645 /* check to see if we've been asked to pause */
5646 if (volume_LRU.scanner_state == VLRU_SCANNER_STATE_PAUSING) {
5647 volume_LRU.scanner_state = VLRU_SCANNER_STATE_PAUSED;
5648 assert(pthread_cond_broadcast(&volume_LRU.cv) == 0);
5650 VOL_CV_WAIT(&volume_LRU.cv);
5651 } while (volume_LRU.scanner_state == VLRU_SCANNER_STATE_PAUSED);
5654 /* scheduling can happen outside the glock */
5657 /* figure out what is next on the schedule */
5659 /* figure out a potential schedule for the new generation first */
5661 min_delay = volume_LRU.scan_interval[0] + volume_LRU.last_scan[0] - now;
5664 if (min_delay > volume_LRU.scan_interval[0]) {
5665 /* unsigned overflow -- we're overdue to run this scan */
5670 /* if we're not overdue for gen 0, figure out schedule for candidate gen */
5672 i = VLRU_QUEUE_CANDIDATE;
5673 delay = volume_LRU.scan_interval[i] + volume_LRU.last_scan[i] - now;
5674 if (delay < min_delay) {
5678 if (delay > volume_LRU.scan_interval[i]) {
5679 /* unsigned overflow -- we're overdue to run this scan */
5686 /* if we're still not overdue for something, figure out schedules for promotions */
5687 for (i=0; !overdue && i < VLRU_GENERATIONS-1; i++) {
5688 delay = volume_LRU.promotion_interval[i] + volume_LRU.last_promotion[i] - now;
5689 if (delay < min_delay) {
5694 if (delay > volume_LRU.promotion_interval[i]) {
5695 /* unsigned overflow -- we're overdue to run this promotion */
5704 /* sleep as needed */
5709 /* do whatever is next */
5712 VLRU_Promote_r(min_idx);
5713 VLRU_Demote_r(min_idx+1);
5715 VLRU_Scan_r(min_idx);
5717 now = FT_ApproxTime();
5720 Log("VLRU scanner asked to go offline (scanner_state=%d)\n", state);
5722 /* signal that scanner is down */
5723 volume_LRU.scanner_state = VLRU_SCANNER_STATE_OFFLINE;
5724 assert(pthread_cond_broadcast(&volume_LRU.cv) == 0);
5730 * promote volumes from one VLRU generation to the next.
5732 * This routine scans a VLRU generation looking for volumes which are
5733 * eligible to be promoted to the next generation. All volumes which
5734 * meet the eligibility requirement are promoted.
5736 * Promotion eligibility is based upon meeting both of the following
5739 * @arg The volume has been accessed since the last promotion:
5740 * @c (vp->stats.last_get >= vp->stats.last_promote)
5741 * @arg The last promotion occurred at least
5742 * @c volume_LRU.promotion_interval[idx] seconds ago
5744 * As a performance optimization, promotions are "globbed". In other
5745 * words, we promote arbitrarily large contiguous sublists of elements
5748 * @param[in] idx VLRU queue index to scan
5752 * @internal VLRU internal use only.
5755 VLRU_Promote_r(int idx)
5757 int len, chaining, promote;
5758 afs_uint32 now, thresh;
5759 struct rx_queue *qp, *nqp;
5760 Volume * vp, *start, *end;
5762 /* get exclusive access to two chains, and drop the glock */
5763 VLRU_Wait_r(&volume_LRU.q[idx]);
5764 VLRU_BeginExclusive_r(&volume_LRU.q[idx]);
5765 VLRU_Wait_r(&volume_LRU.q[idx+1]);
5766 VLRU_BeginExclusive_r(&volume_LRU.q[idx+1]);
5769 thresh = volume_LRU.promotion_interval[idx];
5770 now = FT_ApproxTime();
5773 for (queue_ScanBackwards(&volume_LRU.q[idx], qp, nqp, rx_queue)) {
5774 vp = (Volume *)((char *)qp - offsetof(Volume, vlru));
5775 promote = (((vp->stats.last_promote + thresh) <= now) &&
5776 (vp->stats.last_get >= vp->stats.last_promote));
5784 /* promote and prepend chain */
5785 queue_MoveChainAfter(&volume_LRU.q[idx+1], &start->vlru, &end->vlru);
5799 /* promote and prepend */
5800 queue_MoveChainAfter(&volume_LRU.q[idx+1], &start->vlru, &end->vlru);
5804 volume_LRU.q[idx].len -= len;
5805 volume_LRU.q[idx+1].len += len;
5808 /* release exclusive access to the two chains */
5810 volume_LRU.last_promotion[idx] = now;
5811 VLRU_EndExclusive_r(&volume_LRU.q[idx+1]);
5812 VLRU_EndExclusive_r(&volume_LRU.q[idx]);
5815 /* run the demotions */
5817 VLRU_Demote_r(int idx)
5820 int len, chaining, demote;
5821 afs_uint32 now, thresh;
5822 struct rx_queue *qp, *nqp;
5823 Volume * vp, *start, *end;
5824 Volume ** salv_flag_vec = NULL;
5825 int salv_vec_offset = 0;
5827 assert(idx == VLRU_QUEUE_MID || idx == VLRU_QUEUE_OLD);
5829 /* get exclusive access to two chains, and drop the glock */
5830 VLRU_Wait_r(&volume_LRU.q[idx-1]);
5831 VLRU_BeginExclusive_r(&volume_LRU.q[idx-1]);
5832 VLRU_Wait_r(&volume_LRU.q[idx]);
5833 VLRU_BeginExclusive_r(&volume_LRU.q[idx]);
5836 /* no big deal if this allocation fails */
5837 if (volume_LRU.q[idx].len) {
5838 salv_flag_vec = (Volume **) malloc(volume_LRU.q[idx].len * sizeof(Volume *));
5841 now = FT_ApproxTime();
5842 thresh = volume_LRU.promotion_interval[idx-1];
5845 for (queue_ScanBackwards(&volume_LRU.q[idx], qp, nqp, rx_queue)) {
5846 vp = (Volume *)((char *)qp - offsetof(Volume, vlru));
5847 demote = (((vp->stats.last_promote + thresh) <= now) &&
5848 (vp->stats.last_get < (now - thresh)));
5850 /* we now do volume update list DONT_SALVAGE flag setting during
5851 * demotion passes */
5852 if (salv_flag_vec &&
5853 !(V_attachFlags(vp) & VOL_HDR_DONTSALV) &&
5855 (vp->updateTime < (now - SALVAGE_INTERVAL)) &&
5856 (V_attachState(vp) == VOL_STATE_ATTACHED)) {
5857 salv_flag_vec[salv_vec_offset++] = vp;
5858 VCreateReservation_r(vp);
5867 /* demote and append chain */
5868 queue_MoveChainBefore(&volume_LRU.q[idx-1], &start->vlru, &end->vlru);
5882 queue_MoveChainBefore(&volume_LRU.q[idx-1], &start->vlru, &end->vlru);
5886 volume_LRU.q[idx].len -= len;
5887 volume_LRU.q[idx-1].len += len;
5890 /* release exclusive access to the two chains */
5892 VLRU_EndExclusive_r(&volume_LRU.q[idx]);
5893 VLRU_EndExclusive_r(&volume_LRU.q[idx-1]);
5895 /* now go back and set the DONT_SALVAGE flags as appropriate */
5896 if (salv_flag_vec) {
5898 for (i = 0; i < salv_vec_offset; i++) {
5899 vp = salv_flag_vec[i];
5900 if (!(V_attachFlags(vp) & VOL_HDR_DONTSALV) &&
5901 (vp->updateTime < (now - SALVAGE_INTERVAL)) &&
5902 (V_attachState(vp) == VOL_STATE_ATTACHED)) {
5905 V_attachFlags(vp) |= VOL_HDR_DONTSALV;
5906 V_dontSalvage(vp) = DONT_SALVAGE;
5907 VUpdateVolume_r(&ec, vp, 0);
5911 VCancelReservation_r(vp);
5913 free(salv_flag_vec);
5917 /* run a pass of the VLRU GC scanner */
5919 VLRU_Scan_r(int idx)
5921 afs_uint32 now, thresh;
5922 struct rx_queue *qp, *nqp;
5923 volatile Volume * vp;
5926 assert(idx == VLRU_QUEUE_NEW || idx == VLRU_QUEUE_CANDIDATE);
5928 /* gain exclusive access to the idx VLRU */
5929 VLRU_Wait_r(&volume_LRU.q[idx]);
5930 VLRU_BeginExclusive_r(&volume_LRU.q[idx]);
5932 if (idx != VLRU_QUEUE_CANDIDATE) {
5933 /* gain exclusive access to the candidate VLRU */
5934 VLRU_Wait_r(&volume_LRU.q[VLRU_QUEUE_CANDIDATE]);
5935 VLRU_BeginExclusive_r(&volume_LRU.q[VLRU_QUEUE_CANDIDATE]);
5938 now = FT_ApproxTime();
5939 thresh = now - VLRU_offline_thresh;
5941 /* perform candidate selection and soft detaching */
5942 if (idx == VLRU_QUEUE_CANDIDATE) {
5943 /* soft detach some volumes from the candidate pool */
5947 for (i=0,queue_ScanBackwards(&volume_LRU.q[idx], qp, nqp, rx_queue)) {
5948 vp = (Volume *)((char *)qp - offsetof(Volume, vlru));
5949 if (i >= VLRU_offline_max) {
5952 /* check timestamp to see if it's a candidate for soft detaching */
5953 if (vp->stats.last_get <= thresh) {
5955 if (VCheckSoftDetach(vp, thresh))
5961 /* scan for volumes to become soft detach candidates */
5962 for (i=1,queue_ScanBackwards(&volume_LRU.q[idx], qp, nqp, rx_queue),i++) {
5963 vp = (Volume *)((char *)qp - offsetof(Volume, vlru));
5965 /* check timestamp to see if it's a candidate for soft detaching */
5966 if (vp->stats.last_get <= thresh) {
5967 VCheckSoftDetachCandidate(vp, thresh);
5970 if (!(i&0x7f)) { /* lock coarsening optimization */
5978 /* relinquish exclusive access to the VLRU chains */
5982 volume_LRU.last_scan[idx] = now;
5983 if (idx != VLRU_QUEUE_CANDIDATE) {
5984 VLRU_EndExclusive_r(&volume_LRU.q[VLRU_QUEUE_CANDIDATE]);
5986 VLRU_EndExclusive_r(&volume_LRU.q[idx]);
5989 /* check whether volume is safe to soft detach
5990 * caller MUST NOT hold a ref count on vp */
5992 VCheckSoftDetach(volatile Volume * vp, afs_uint32 thresh)
5996 if (vp->nUsers || vp->nWaiters)
5999 if (vp->stats.last_get <= thresh) {
6000 ret = VSoftDetachVolume_r(vp, thresh);
6006 /* check whether volume should be made a
6007 * soft detach candidate */
6009 VCheckSoftDetachCandidate(volatile Volume * vp, afs_uint32 thresh)
6012 if (vp->nUsers || vp->nWaiters)
6017 assert(idx == VLRU_QUEUE_NEW);
6019 if (vp->stats.last_get <= thresh) {
6020 /* move to candidate pool */
6021 queue_Remove(&vp->vlru);
6022 volume_LRU.q[VLRU_QUEUE_NEW].len--;
6023 queue_Prepend(&volume_LRU.q[VLRU_QUEUE_CANDIDATE], &vp->vlru);
6024 vp->vlru.idx = VLRU_QUEUE_CANDIDATE;
6025 volume_LRU.q[VLRU_QUEUE_CANDIDATE].len++;
6033 /* begin exclusive access on VLRU */
6035 VLRU_BeginExclusive_r(struct VLRU_q * q)
6037 assert(q->busy == 0);
6041 /* end exclusive access on VLRU */
6043 VLRU_EndExclusive_r(struct VLRU_q * q)
6047 assert(pthread_cond_broadcast(&q->cv) == 0);
6050 /* wait for another thread to end exclusive access on VLRU */
6052 VLRU_Wait_r(struct VLRU_q * q)
6055 VOL_CV_WAIT(&q->cv);
6060 * volume soft detach
6062 * caller MUST NOT hold a ref count on vp */
6064 VSoftDetachVolume_r(volatile Volume * vp, afs_uint32 thresh)
6069 assert(vp->vlru.idx == VLRU_QUEUE_CANDIDATE);
6071 ts_save = vp->stats.last_get;
6072 if (ts_save > thresh)
6075 if (vp->nUsers || vp->nWaiters)
6078 if (VIsExclusiveState(V_attachState(vp))) {
6082 switch (V_attachState(vp)) {
6083 case VOL_STATE_UNATTACHED:
6084 case VOL_STATE_PREATTACHED:
6085 case VOL_STATE_ERROR:
6086 case VOL_STATE_GOING_OFFLINE:
6087 case VOL_STATE_SHUTTING_DOWN:
6088 case VOL_STATE_SALVAGING:
6089 volume_LRU.q[vp->vlru.idx].len--;
6091 /* create and cancel a reservation to
6092 * give the volume an opportunity to
6094 VCreateReservation_r(vp);
6095 queue_Remove(&vp->vlru);
6096 vp->vlru.idx = VLRU_QUEUE_INVALID;
6097 V_attachFlags(vp) &= ~(VOL_ON_VLRU);
6098 VCancelReservation_r(vp);
6102 /* hold the volume and take it offline.
6103 * no need for reservations, as VHold_r
6104 * takes care of that internally. */
6105 if (VHold_r(vp) == 0) {
6106 /* vhold drops the glock, so now we should
6107 * check to make sure we aren't racing against
6108 * other threads. if we are racing, offlining vp
6109 * would be wasteful, and block the scanner for a while
6113 (vp->shuttingDown) ||
6114 (vp->goingOffline) ||
6115 (vp->stats.last_get != ts_save)) {
6116 /* looks like we're racing someone else. bail */
6120 /* pull it off the VLRU */
6121 assert(vp->vlru.idx == VLRU_QUEUE_CANDIDATE);
6122 volume_LRU.q[VLRU_QUEUE_CANDIDATE].len--;
6123 queue_Remove(&vp->vlru);
6124 vp->vlru.idx = VLRU_QUEUE_INVALID;
6125 V_attachFlags(vp) &= ~(VOL_ON_VLRU);
6127 /* take if offline */
6128 VOffline_r(vp, "volume has been soft detached");
6130 /* invalidate the volume header cache */
6131 FreeVolumeHeader(vp);
6134 IncUInt64(&VStats.soft_detaches);
6135 vp->stats.soft_detaches++;
6137 /* put in pre-attached state so demand
6138 * attacher can work on it */
6139 VChangeState_r(vp, VOL_STATE_PREATTACHED);
6145 #endif /* AFS_DEMAND_ATTACH_FS */
6148 /***************************************************/
6149 /* Volume Header Cache routines */
6150 /***************************************************/
6153 * volume header cache.
6155 struct volume_hdr_LRU_t volume_hdr_LRU;
6158 * initialize the volume header cache.
6160 * @param[in] howMany number of header cache entries to preallocate
6162 * @pre VOL_LOCK held. Function has never been called before.
6164 * @post howMany cache entries are allocated, initialized, and added
6165 * to the LRU list. Header cache statistics are initialized.
6167 * @note only applicable to fileServer program type. Should only be
6168 * called once during volume package initialization.
6170 * @internal volume package internal use only.
6173 VInitVolumeHeaderCache(afs_uint32 howMany)
6175 register struct volHeader *hp;
6176 if (programType != fileServer)
6178 queue_Init(&volume_hdr_LRU);
6179 volume_hdr_LRU.stats.free = 0;
6180 volume_hdr_LRU.stats.used = howMany;
6181 volume_hdr_LRU.stats.attached = 0;
6182 hp = (struct volHeader *)(calloc(howMany, sizeof(struct volHeader)));
6184 ReleaseVolumeHeader(hp++);
6188 * get a volume header and attach it to the volume object.
6190 * @param[in] vp pointer to volume object
6192 * @return cache entry status
6193 * @retval 0 volume header was newly attached; cache data is invalid
6194 * @retval 1 volume header was previously attached; cache data is valid
6196 * @pre VOL_LOCK held. For DAFS, lightweight ref must be held on volume object.
6198 * @post volume header attached to volume object. if necessary, header cache
6199 * entry on LRU is synchronized to disk. Header is removed from LRU list.
6201 * @note VOL_LOCK may be dropped
6203 * @warning this interface does not load header data from disk. it merely
6204 * attaches a header object to the volume object, and may sync the old
6205 * header cache data out to disk in the process.
6207 * @internal volume package internal use only.
6210 GetVolumeHeader(register Volume * vp)
6213 register struct volHeader *hd;
6215 static int everLogged = 0;
6217 #ifdef AFS_DEMAND_ATTACH_FS
6218 VolState vp_save, back_save;
6220 /* XXX debug 9/19/05 we've apparently got
6221 * a ref counting bug somewhere that's
6222 * breaking the nUsers == 0 => header on LRU
6224 if (vp->header && queue_IsNotOnQueue(vp->header)) {
6225 Log("nUsers == 0, but header not on LRU\n");
6230 old = (vp->header != NULL); /* old == volume already has a header */
6232 if (programType != fileServer) {
6233 /* for volume utilities, we allocate volHeaders as needed */
6235 hd = (struct volHeader *)calloc(1, sizeof(*vp->header));
6239 #ifdef AFS_DEMAND_ATTACH_FS
6240 V_attachFlags(vp) |= VOL_HDR_ATTACHED;
6244 /* for the fileserver, we keep a volume header cache */
6246 /* the header we previously dropped in the lru is
6247 * still available. pull it off the lru and return */
6250 assert(hd->back == vp);
6252 /* we need to grab a new element off the LRU */
6253 if (queue_IsNotEmpty(&volume_hdr_LRU)) {
6254 /* grab an element and pull off of LRU */
6255 hd = queue_First(&volume_hdr_LRU, volHeader);
6258 /* LRU is empty, so allocate a new volHeader
6259 * this is probably indicative of a leak, so let the user know */
6260 hd = (struct volHeader *)calloc(1, sizeof(struct volHeader));
6263 Log("****Allocated more volume headers, probably leak****\n");
6266 volume_hdr_LRU.stats.free++;
6269 /* this header used to belong to someone else.
6270 * we'll need to check if the header needs to
6271 * be sync'd out to disk */
6273 #ifdef AFS_DEMAND_ATTACH_FS
6274 /* if hd->back were in an exclusive state, then
6275 * its volHeader would not be on the LRU... */
6276 assert(!VIsExclusiveState(V_attachState(hd->back)));
6279 if (hd->diskstuff.inUse) {
6280 /* volume was in use, so we'll need to sync
6281 * its header to disk */
6283 #ifdef AFS_DEMAND_ATTACH_FS
6284 back_save = VChangeState_r(hd->back, VOL_STATE_UPDATING);
6285 vp_save = VChangeState_r(vp, VOL_STATE_HDR_ATTACHING);
6286 VCreateReservation_r(hd->back);
6290 WriteVolumeHeader_r(&error, hd->back);
6291 /* Ignore errors; catch them later */
6293 #ifdef AFS_DEMAND_ATTACH_FS
6298 hd->back->header = NULL;
6299 #ifdef AFS_DEMAND_ATTACH_FS
6300 V_attachFlags(hd->back) &= ~(VOL_HDR_ATTACHED | VOL_HDR_LOADED | VOL_HDR_IN_LRU);
6302 if (hd->diskstuff.inUse) {
6303 VChangeState_r(hd->back, back_save);
6304 VCancelReservation_r(hd->back);
6305 VChangeState_r(vp, vp_save);
6309 volume_hdr_LRU.stats.attached++;
6313 #ifdef AFS_DEMAND_ATTACH_FS
6314 V_attachFlags(vp) |= VOL_HDR_ATTACHED;
6317 volume_hdr_LRU.stats.free--;
6318 volume_hdr_LRU.stats.used++;
6320 IncUInt64(&VStats.hdr_gets);
6321 #ifdef AFS_DEMAND_ATTACH_FS
6322 IncUInt64(&vp->stats.hdr_gets);
6323 vp->stats.last_hdr_get = FT_ApproxTime();
6330 * make sure volume header is attached and contains valid cache data.
6332 * @param[out] ec outbound error code
6333 * @param[in] vp pointer to volume object
6335 * @pre VOL_LOCK held. For DAFS, lightweight ref held on vp.
6337 * @post header cache entry attached, and loaded with valid data, or
6338 * *ec is nonzero, and the header is released back into the LRU.
6340 * @internal volume package internal use only.
6343 LoadVolumeHeader(Error * ec, Volume * vp)
6345 #ifdef AFS_DEMAND_ATTACH_FS
6346 VolState state_save;
6350 if (vp->nUsers == 0 && !GetVolumeHeader(vp)) {
6351 IncUInt64(&VStats.hdr_loads);
6352 state_save = VChangeState_r(vp, VOL_STATE_HDR_LOADING);
6355 ReadHeader(ec, V_diskDataHandle(vp), (char *)&V_disk(vp),
6356 sizeof(V_disk(vp)), VOLUMEINFOMAGIC,
6358 IncUInt64(&vp->stats.hdr_loads);
6359 now = FT_ApproxTime();
6363 V_attachFlags(vp) |= VOL_HDR_LOADED;
6364 vp->stats.last_hdr_load = now;
6366 VChangeState_r(vp, state_save);
6368 #else /* AFS_DEMAND_ATTACH_FS */
6370 if (vp->nUsers == 0 && !GetVolumeHeader(vp)) {
6371 IncUInt64(&VStats.hdr_loads);
6373 ReadHeader(ec, V_diskDataHandle(vp), (char *)&V_disk(vp),
6374 sizeof(V_disk(vp)), VOLUMEINFOMAGIC,
6377 #endif /* AFS_DEMAND_ATTACH_FS */
6379 /* maintain (nUsers==0) => header in LRU invariant */
6380 ReleaseVolumeHeader(vp->header);
6385 * release a header cache entry back into the LRU list.
6387 * @param[in] hd pointer to volume header cache object
6389 * @pre VOL_LOCK held.
6391 * @post header cache object appended onto end of LRU list.
6393 * @note only applicable to fileServer program type.
6395 * @note used to place a header cache entry back into the
6396 * LRU pool without invalidating it as a cache entry.
6398 * @internal volume package internal use only.
6401 ReleaseVolumeHeader(register struct volHeader *hd)
6403 if (programType != fileServer)
6405 if (!hd || queue_IsOnQueue(hd)) /* no header, or header already released */
6407 queue_Append(&volume_hdr_LRU, hd);
6408 #ifdef AFS_DEMAND_ATTACH_FS
6410 V_attachFlags(hd->back) |= VOL_HDR_IN_LRU;
6413 volume_hdr_LRU.stats.free++;
6414 volume_hdr_LRU.stats.used--;
6418 * free/invalidate a volume header cache entry.
6420 * @param[in] vp pointer to volume object
6422 * @pre VOL_LOCK is held.
6424 * @post For fileserver, header cache entry is returned to LRU, and it is
6425 * invalidated as a cache entry. For volume utilities, the header
6426 * cache entry is freed.
6428 * @note For fileserver, this should be utilized instead of ReleaseVolumeHeader
6429 * whenever it is necessary to invalidate the header cache entry.
6431 * @see ReleaseVolumeHeader
6433 * @internal volume package internal use only.
6436 FreeVolumeHeader(register Volume * vp)
6438 register struct volHeader *hd = vp->header;
6441 if (programType == fileServer) {
6442 ReleaseVolumeHeader(hd);
6447 #ifdef AFS_DEMAND_ATTACH_FS
6448 V_attachFlags(vp) &= ~(VOL_HDR_ATTACHED | VOL_HDR_IN_LRU | VOL_HDR_LOADED);
6450 volume_hdr_LRU.stats.attached--;
6455 /***************************************************/
6456 /* Volume Hash Table routines */
6457 /***************************************************/
6460 * set size of volume object hash table.
6462 * @param[in] logsize log(2) of desired hash table size
6464 * @return operation status
6466 * @retval -1 failure
6468 * @pre MUST be called prior to VInitVolumePackage
6470 * @post Volume Hash Table will have 2^logsize buckets
6473 VSetVolHashSize(int logsize)
6475 /* 64 to 16384 hash buckets seems like a reasonable range */
6476 if ((logsize < 6 ) || (logsize > 14)) {
6481 VolumeHashTable.Size = 1 << logsize;
6482 VolumeHashTable.Mask = VolumeHashTable.Size - 1;
6484 /* we can't yet support runtime modification of this
6485 * parameter. we'll need a configuration rwlock to
6486 * make runtime modification feasible.... */
6493 * initialize dynamic data structures for volume hash table.
6495 * @post hash table is allocated, and fields are initialized.
6497 * @internal volume package internal use only.
6500 VInitVolumeHash(void)
6504 VolumeHashTable.Table = (VolumeHashChainHead *) calloc(VolumeHashTable.Size,
6505 sizeof(VolumeHashChainHead));
6506 assert(VolumeHashTable.Table != NULL);
6508 for (i=0; i < VolumeHashTable.Size; i++) {
6509 queue_Init(&VolumeHashTable.Table[i]);
6510 #ifdef AFS_DEMAND_ATTACH_FS
6511 assert(pthread_cond_init(&VolumeHashTable.Table[i].chain_busy_cv, NULL) == 0);
6512 #endif /* AFS_DEMAND_ATTACH_FS */
6517 * add a volume object to the hash table.
6519 * @param[in] vp pointer to volume object
6520 * @param[in] hashid hash of volume id
6522 * @pre VOL_LOCK is held. For DAFS, caller must hold a lightweight
6525 * @post volume is added to hash chain.
6527 * @internal volume package internal use only.
6529 * @note For DAFS, VOL_LOCK may be dropped in order to wait for an
6530 * asynchronous hash chain reordering to finish.
6533 AddVolumeToHashTable(register Volume * vp, int hashid)
6535 VolumeHashChainHead * head;
6537 if (queue_IsOnQueue(vp))
6540 head = &VolumeHashTable.Table[VOLUME_HASH(hashid)];
6542 #ifdef AFS_DEMAND_ATTACH_FS
6543 /* wait for the hash chain to become available */
6546 V_attachFlags(vp) |= VOL_IN_HASH;
6547 vp->chainCacheCheck = ++head->cacheCheck;
6548 #endif /* AFS_DEMAND_ATTACH_FS */
6551 vp->hashid = hashid;
6552 queue_Append(head, vp);
6553 vp->vnodeHashOffset = VolumeHashOffset_r();
6557 * delete a volume object from the hash table.
6559 * @param[in] vp pointer to volume object
6561 * @pre VOL_LOCK is held. For DAFS, caller must hold a lightweight
6564 * @post volume is removed from hash chain.
6566 * @internal volume package internal use only.
6568 * @note For DAFS, VOL_LOCK may be dropped in order to wait for an
6569 * asynchronous hash chain reordering to finish.
6572 DeleteVolumeFromHashTable(register Volume * vp)
6574 VolumeHashChainHead * head;
6576 if (!queue_IsOnQueue(vp))
6579 head = &VolumeHashTable.Table[VOLUME_HASH(vp->hashid)];
6581 #ifdef AFS_DEMAND_ATTACH_FS
6582 /* wait for the hash chain to become available */
6585 V_attachFlags(vp) &= ~(VOL_IN_HASH);
6587 #endif /* AFS_DEMAND_ATTACH_FS */
6591 /* do NOT reset hashid to zero, as the online
6592 * salvager package may need to know the volume id
6593 * after the volume is removed from the hash */
6597 * lookup a volume object in the hash table given a volume id.
6599 * @param[out] ec error code return
6600 * @param[in] volumeId volume id
6601 * @param[in] hint volume object which we believe could be the correct
6604 * @return volume object pointer
6605 * @retval NULL no such volume id is registered with the hash table.
6607 * @pre VOL_LOCK is held. For DAFS, caller must hold a lightweight
6610 * @post volume object with the given id is returned. volume object and
6611 * hash chain access statistics are updated. hash chain may have
6614 * @note For DAFS, VOL_LOCK may be dropped in order to wait for an
6615 * asynchronous hash chain reordering operation to finish, or
6616 * in order for us to perform an asynchronous chain reordering.
6618 * @note Hash chain reorderings occur when the access count for the
6619 * volume object being looked up exceeds the sum of the previous
6620 * node's (the node ahead of it in the hash chain linked list)
6621 * access count plus the constant VOLUME_HASH_REORDER_THRESHOLD.
6623 * @note For DAFS, the hint parameter allows us to short-circuit if the
6624 * cacheCheck fields match between the hash chain head and the
6625 * hint volume object.
6628 VLookupVolume_r(Error * ec, VolId volumeId, Volume * hint)
6630 register int looks = 0;
6631 Volume * vp, *np, *pp;
6632 VolumeHashChainHead * head;
6635 head = &VolumeHashTable.Table[VOLUME_HASH(volumeId)];
6637 #ifdef AFS_DEMAND_ATTACH_FS
6638 /* wait for the hash chain to become available */
6641 /* check to see if we can short circuit without walking the hash chain */
6642 if (hint && (hint->chainCacheCheck == head->cacheCheck)) {
6643 IncUInt64(&hint->stats.hash_short_circuits);
6646 #endif /* AFS_DEMAND_ATTACH_FS */
6648 /* someday we need to either do per-chain locks, RWlocks,
6649 * or both for volhash access.
6650 * (and move to a data structure with better cache locality) */
6652 /* search the chain for this volume id */
6653 for(queue_Scan(head, vp, np, Volume)) {
6655 if ((vp->hashid == volumeId)) {
6660 if (queue_IsEnd(head, vp)) {
6664 #ifdef AFS_DEMAND_ATTACH_FS
6665 /* update hash chain statistics */
6668 FillInt64(lks, 0, looks);
6669 AddUInt64(head->looks, lks, &head->looks);
6670 AddUInt64(VStats.hash_looks, lks, &VStats.hash_looks);
6671 IncUInt64(&head->gets);
6676 IncUInt64(&vp->stats.hash_lookups);
6678 /* for demand attach fileserver, we permit occasional hash chain reordering
6679 * so that frequently looked up volumes move towards the head of the chain */
6680 pp = queue_Prev(vp, Volume);
6681 if (!queue_IsEnd(head, pp)) {
6682 FillInt64(thresh, 0, VOLUME_HASH_REORDER_THRESHOLD);
6683 AddUInt64(thresh, pp->stats.hash_lookups, &thresh);
6684 if (GEInt64(vp->stats.hash_lookups, thresh)) {
6685 VReorderHash_r(head, pp, vp);
6689 /* update the short-circuit cache check */
6690 vp->chainCacheCheck = head->cacheCheck;
6692 #endif /* AFS_DEMAND_ATTACH_FS */
6697 #ifdef AFS_DEMAND_ATTACH_FS
6698 /* perform volume hash chain reordering.
6700 * advance a subchain beginning at vp ahead of
6701 * the adjacent subchain ending at pp */
6703 VReorderHash_r(VolumeHashChainHead * head, Volume * pp, Volume * vp)
6705 Volume *tp, *np, *lp;
6706 afs_uint64 move_thresh;
6708 /* this should never be called if the chain is already busy, so
6709 * no need to wait for other exclusive chain ops to finish */
6711 /* this is a rather heavy set of operations,
6712 * so let's set the chain busy flag and drop
6714 VHashBeginExclusive_r(head);
6717 /* scan forward in the chain from vp looking for the last element
6718 * in the chain we want to advance */
6719 FillInt64(move_thresh, 0, VOLUME_HASH_REORDER_CHAIN_THRESH);
6720 AddUInt64(move_thresh, pp->stats.hash_lookups, &move_thresh);
6721 for(queue_ScanFrom(head, vp, tp, np, Volume)) {
6722 if (LTInt64(tp->stats.hash_lookups, move_thresh)) {
6726 lp = queue_Prev(tp, Volume);
6728 /* scan backwards from pp to determine where to splice and
6729 * insert the subchain we're advancing */
6730 for(queue_ScanBackwardsFrom(head, pp, tp, np, Volume)) {
6731 if (GTInt64(tp->stats.hash_lookups, move_thresh)) {
6735 tp = queue_Next(tp, Volume);
6737 /* rebalance chain(vp,...,lp) ahead of chain(tp,...,pp) */
6738 queue_MoveChainBefore(tp,vp,lp);
6741 IncUInt64(&VStats.hash_reorders);
6743 IncUInt64(&head->reorders);
6745 /* wake up any threads waiting for the hash chain */
6746 VHashEndExclusive_r(head);
6750 /* demand-attach fs volume hash
6751 * asynchronous exclusive operations */
6754 * begin an asynchronous exclusive operation on a volume hash chain.
6756 * @param[in] head pointer to volume hash chain head object
6758 * @pre VOL_LOCK held. hash chain is quiescent.
6760 * @post hash chain marked busy.
6762 * @note this interface is used in conjunction with VHashEndExclusive_r and
6763 * VHashWait_r to perform asynchronous (wrt VOL_LOCK) operations on a
6764 * volume hash chain. Its main use case is hash chain reordering, which
6765 * has the potential to be a highly latent operation.
6767 * @see VHashEndExclusive_r
6772 * @internal volume package internal use only.
6775 VHashBeginExclusive_r(VolumeHashChainHead * head)
6777 assert(head->busy == 0);
6782 * relinquish exclusive ownership of a volume hash chain.
6784 * @param[in] head pointer to volume hash chain head object
6786 * @pre VOL_LOCK held. thread owns the hash chain exclusively.
6788 * @post hash chain is marked quiescent. threads awaiting use of
6789 * chain are awakened.
6791 * @see VHashBeginExclusive_r
6796 * @internal volume package internal use only.
6799 VHashEndExclusive_r(VolumeHashChainHead * head)
6803 assert(pthread_cond_broadcast(&head->chain_busy_cv) == 0);
6807 * wait for all asynchronous operations on a hash chain to complete.
6809 * @param[in] head pointer to volume hash chain head object
6811 * @pre VOL_LOCK held.
6813 * @post hash chain object is quiescent.
6815 * @see VHashBeginExclusive_r
6816 * @see VHashEndExclusive_r
6820 * @note This interface should be called before any attempt to
6821 * traverse the hash chain. It is permissible for a thread
6822 * to gain exclusive access to the chain, and then perform
6823 * latent operations on the chain asynchronously wrt the
6826 * @warning if waiting is necessary, VOL_LOCK is dropped
6828 * @internal volume package internal use only.
6831 VHashWait_r(VolumeHashChainHead * head)
6833 while (head->busy) {
6834 VOL_CV_WAIT(&head->chain_busy_cv);
6837 #endif /* AFS_DEMAND_ATTACH_FS */
6840 /***************************************************/
6841 /* Volume by Partition List routines */
6842 /***************************************************/
6845 * demand attach fileserver adds a
6846 * linked list of volumes to each
6847 * partition object, thus allowing
6848 * for quick enumeration of all
6849 * volumes on a partition
6852 #ifdef AFS_DEMAND_ATTACH_FS
6854 * add a volume to its disk partition VByPList.
6856 * @param[in] vp pointer to volume object
6858 * @pre either the disk partition VByPList is owned exclusively
6859 * by the calling thread, or the list is quiescent and
6862 * @post volume is added to disk partition VByPList
6866 * @warning it is the caller's responsibility to ensure list
6869 * @see VVByPListWait_r
6870 * @see VVByPListBeginExclusive_r
6871 * @see VVByPListEndExclusive_r
6873 * @internal volume package internal use only.
6876 AddVolumeToVByPList_r(Volume * vp)
6878 if (queue_IsNotOnQueue(&vp->vol_list)) {
6879 queue_Append(&vp->partition->vol_list, &vp->vol_list);
6880 V_attachFlags(vp) |= VOL_ON_VBYP_LIST;
6881 vp->partition->vol_list.len++;
6886 * delete a volume from its disk partition VByPList.
6888 * @param[in] vp pointer to volume object
6890 * @pre either the disk partition VByPList is owned exclusively
6891 * by the calling thread, or the list is quiescent and
6894 * @post volume is removed from the disk partition VByPList
6898 * @warning it is the caller's responsibility to ensure list
6901 * @see VVByPListWait_r
6902 * @see VVByPListBeginExclusive_r
6903 * @see VVByPListEndExclusive_r
6905 * @internal volume package internal use only.
6908 DeleteVolumeFromVByPList_r(Volume * vp)
6910 if (queue_IsOnQueue(&vp->vol_list)) {
6911 queue_Remove(&vp->vol_list);
6912 V_attachFlags(vp) &= ~(VOL_ON_VBYP_LIST);
6913 vp->partition->vol_list.len--;
6918 * begin an asynchronous exclusive operation on a VByPList.
6920 * @param[in] dp pointer to disk partition object
6922 * @pre VOL_LOCK held. VByPList is quiescent.
6924 * @post VByPList marked busy.
6926 * @note this interface is used in conjunction with VVByPListEndExclusive_r and
6927 * VVByPListWait_r to perform asynchronous (wrt VOL_LOCK) operations on a
6930 * @see VVByPListEndExclusive_r
6931 * @see VVByPListWait_r
6935 * @internal volume package internal use only.
6937 /* take exclusive control over the list */
6939 VVByPListBeginExclusive_r(struct DiskPartition64 * dp)
6941 assert(dp->vol_list.busy == 0);
6942 dp->vol_list.busy = 1;
6946 * relinquish exclusive ownership of a VByPList.
6948 * @param[in] dp pointer to disk partition object
6950 * @pre VOL_LOCK held. thread owns the VByPList exclusively.
6952 * @post VByPList is marked quiescent. threads awaiting use of
6953 * the list are awakened.
6955 * @see VVByPListBeginExclusive_r
6956 * @see VVByPListWait_r
6960 * @internal volume package internal use only.
6963 VVByPListEndExclusive_r(struct DiskPartition64 * dp)
6965 assert(dp->vol_list.busy);
6966 dp->vol_list.busy = 0;
6967 assert(pthread_cond_broadcast(&dp->vol_list.cv) == 0);
6971 * wait for all asynchronous operations on a VByPList to complete.
6973 * @param[in] dp pointer to disk partition object
6975 * @pre VOL_LOCK is held.
6977 * @post disk partition's VByP list is quiescent
6981 * @note This interface should be called before any attempt to
6982 * traverse the VByPList. It is permissible for a thread
6983 * to gain exclusive access to the list, and then perform
6984 * latent operations on the list asynchronously wrt the
6987 * @warning if waiting is necessary, VOL_LOCK is dropped
6989 * @see VVByPListEndExclusive_r
6990 * @see VVByPListBeginExclusive_r
6992 * @internal volume package internal use only.
6995 VVByPListWait_r(struct DiskPartition64 * dp)
6997 while (dp->vol_list.busy) {
6998 VOL_CV_WAIT(&dp->vol_list.cv);
7001 #endif /* AFS_DEMAND_ATTACH_FS */
7003 /***************************************************/
7004 /* Volume Cache Statistics routines */
7005 /***************************************************/
7008 VPrintCacheStats_r(void)
7010 afs_uint32 get_hi, get_lo, load_hi, load_lo;
7011 register struct VnodeClassInfo *vcp;
7012 vcp = &VnodeClassInfo[vLarge];
7013 Log("Large vnode cache, %d entries, %d allocs, %d gets (%d reads), %d writes\n", vcp->cacheSize, vcp->allocs, vcp->gets, vcp->reads, vcp->writes);
7014 vcp = &VnodeClassInfo[vSmall];
7015 Log("Small vnode cache,%d entries, %d allocs, %d gets (%d reads), %d writes\n", vcp->cacheSize, vcp->allocs, vcp->gets, vcp->reads, vcp->writes);
7016 SplitInt64(VStats.hdr_gets, get_hi, get_lo);
7017 SplitInt64(VStats.hdr_loads, load_hi, load_lo);
7018 Log("Volume header cache, %d entries, %d gets, %d replacements\n",
7019 VStats.hdr_cache_size, get_lo, load_lo);
7023 VPrintCacheStats(void)
7026 VPrintCacheStats_r();
7030 #ifdef AFS_DEMAND_ATTACH_FS
7032 UInt64ToDouble(afs_uint64 * x)
7034 static double c32 = 4.0 * 1.073741824 * 1000000000.0;
7036 SplitInt64(*x, h, l);
7037 return (((double)h) * c32) + ((double) l);
7041 DoubleToPrintable(double x, char * buf, int len)
7043 static double billion = 1000000000.0;
7046 y[0] = (afs_uint32) (x / (billion * billion));
7047 y[1] = (afs_uint32) ((x - (((double)y[0]) * billion * billion)) / billion);
7048 y[2] = (afs_uint32) (x - ((((double)y[0]) * billion * billion) + (((double)y[1]) * billion)));
7051 snprintf(buf, len, "%d%09d%09d", y[0], y[1], y[2]);
7053 snprintf(buf, len, "%d%09d", y[1], y[2]);
7055 snprintf(buf, len, "%d", y[2]);
7061 struct VLRUExtStatsEntry {
7065 struct VLRUExtStats {
7071 } queue_info[VLRU_QUEUE_INVALID];
7072 struct VLRUExtStatsEntry * vec;
7076 * add a 256-entry fudge factor onto the vector in case state changes
7077 * out from under us.
7079 #define VLRU_EXT_STATS_VEC_LEN_FUDGE 256
7082 * collect extended statistics for the VLRU subsystem.
7084 * @param[out] stats pointer to stats structure to be populated
7085 * @param[in] nvols number of volumes currently known to exist
7087 * @pre VOL_LOCK held
7089 * @post stats->vec allocated and populated
7091 * @return operation status
7096 VVLRUExtStats_r(struct VLRUExtStats * stats, afs_uint32 nvols)
7098 afs_uint32 cur, idx, len;
7099 struct rx_queue * qp, * nqp;
7101 struct VLRUExtStatsEntry * vec;
7103 len = nvols + VLRU_EXT_STATS_VEC_LEN_FUDGE;
7104 vec = stats->vec = calloc(len,
7105 sizeof(struct VLRUExtStatsEntry));
7111 for (idx = VLRU_QUEUE_NEW; idx < VLRU_QUEUE_INVALID; idx++) {
7112 VLRU_Wait_r(&volume_LRU.q[idx]);
7113 VLRU_BeginExclusive_r(&volume_LRU.q[idx]);
7116 stats->queue_info[idx].start = cur;
7118 for (queue_Scan(&volume_LRU.q[idx], qp, nqp, rx_queue)) {
7120 /* out of space in vec */
7123 vp = (Volume *)((char *)qp - offsetof(Volume, vlru));
7124 vec[cur].volid = vp->hashid;
7128 stats->queue_info[idx].len = cur - stats->queue_info[idx].start;
7131 VLRU_EndExclusive_r(&volume_LRU.q[idx]);
7139 #define ENUMTOSTRING(en) #en
7140 #define ENUMCASE(en) \
7142 return ENUMTOSTRING(en); \
7146 vlru_idx_to_string(int idx)
7149 ENUMCASE(VLRU_QUEUE_NEW);
7150 ENUMCASE(VLRU_QUEUE_MID);
7151 ENUMCASE(VLRU_QUEUE_OLD);
7152 ENUMCASE(VLRU_QUEUE_CANDIDATE);
7153 ENUMCASE(VLRU_QUEUE_HELD);
7154 ENUMCASE(VLRU_QUEUE_INVALID);
7156 return "**UNKNOWN**";
7161 VPrintExtendedCacheStats_r(int flags)
7164 afs_uint32 vol_sum = 0;
7171 struct stats looks, gets, reorders, len;
7172 struct stats ch_looks, ch_gets, ch_reorders;
7174 VolumeHashChainHead *head;
7176 struct VLRUExtStats vlru_stats;
7178 /* zero out stats */
7179 memset(&looks, 0, sizeof(struct stats));
7180 memset(&gets, 0, sizeof(struct stats));
7181 memset(&reorders, 0, sizeof(struct stats));
7182 memset(&len, 0, sizeof(struct stats));
7183 memset(&ch_looks, 0, sizeof(struct stats));
7184 memset(&ch_gets, 0, sizeof(struct stats));
7185 memset(&ch_reorders, 0, sizeof(struct stats));
7187 for (i = 0; i < VolumeHashTable.Size; i++) {
7188 head = &VolumeHashTable.Table[i];
7191 VHashBeginExclusive_r(head);
7194 ch_looks.sum = UInt64ToDouble(&head->looks);
7195 ch_gets.sum = UInt64ToDouble(&head->gets);
7196 ch_reorders.sum = UInt64ToDouble(&head->reorders);
7198 /* update global statistics */
7200 looks.sum += ch_looks.sum;
7201 gets.sum += ch_gets.sum;
7202 reorders.sum += ch_reorders.sum;
7203 len.sum += (double)head->len;
7204 vol_sum += head->len;
7207 len.min = (double) head->len;
7208 len.max = (double) head->len;
7209 looks.min = ch_looks.sum;
7210 looks.max = ch_looks.sum;
7211 gets.min = ch_gets.sum;
7212 gets.max = ch_gets.sum;
7213 reorders.min = ch_reorders.sum;
7214 reorders.max = ch_reorders.sum;
7216 if (((double)head->len) < len.min)
7217 len.min = (double) head->len;
7218 if (((double)head->len) > len.max)
7219 len.max = (double) head->len;
7220 if (ch_looks.sum < looks.min)
7221 looks.min = ch_looks.sum;
7222 else if (ch_looks.sum > looks.max)
7223 looks.max = ch_looks.sum;
7224 if (ch_gets.sum < gets.min)
7225 gets.min = ch_gets.sum;
7226 else if (ch_gets.sum > gets.max)
7227 gets.max = ch_gets.sum;
7228 if (ch_reorders.sum < reorders.min)
7229 reorders.min = ch_reorders.sum;
7230 else if (ch_reorders.sum > reorders.max)
7231 reorders.max = ch_reorders.sum;
7235 if ((flags & VOL_STATS_PER_CHAIN2) && queue_IsNotEmpty(head)) {
7236 /* compute detailed per-chain stats */
7237 struct stats hdr_loads, hdr_gets;
7238 double v_looks, v_loads, v_gets;
7240 /* initialize stats with data from first element in chain */
7241 vp = queue_First(head, Volume);
7242 v_looks = UInt64ToDouble(&vp->stats.hash_lookups);
7243 v_loads = UInt64ToDouble(&vp->stats.hdr_loads);
7244 v_gets = UInt64ToDouble(&vp->stats.hdr_gets);
7245 ch_gets.min = ch_gets.max = v_looks;
7246 hdr_loads.min = hdr_loads.max = v_loads;
7247 hdr_gets.min = hdr_gets.max = v_gets;
7248 hdr_loads.sum = hdr_gets.sum = 0;
7250 vp = queue_Next(vp, Volume);
7252 /* pull in stats from remaining elements in chain */
7253 for (queue_ScanFrom(head, vp, vp, np, Volume)) {
7254 v_looks = UInt64ToDouble(&vp->stats.hash_lookups);
7255 v_loads = UInt64ToDouble(&vp->stats.hdr_loads);
7256 v_gets = UInt64ToDouble(&vp->stats.hdr_gets);
7258 hdr_loads.sum += v_loads;
7259 hdr_gets.sum += v_gets;
7261 if (v_looks < ch_gets.min)
7262 ch_gets.min = v_looks;
7263 else if (v_looks > ch_gets.max)
7264 ch_gets.max = v_looks;
7266 if (v_loads < hdr_loads.min)
7267 hdr_loads.min = v_loads;
7268 else if (v_loads > hdr_loads.max)
7269 hdr_loads.max = v_loads;
7271 if (v_gets < hdr_gets.min)
7272 hdr_gets.min = v_gets;
7273 else if (v_gets > hdr_gets.max)
7274 hdr_gets.max = v_gets;
7277 /* compute per-chain averages */
7278 ch_gets.avg = ch_gets.sum / ((double)head->len);
7279 hdr_loads.avg = hdr_loads.sum / ((double)head->len);
7280 hdr_gets.avg = hdr_gets.sum / ((double)head->len);
7282 /* dump per-chain stats */
7283 Log("Volume hash chain %d : len=%d, looks=%s, reorders=%s\n",
7285 DoubleToPrintable(ch_looks.sum, pr_buf[0], sizeof(pr_buf[0])),
7286 DoubleToPrintable(ch_reorders.sum, pr_buf[1], sizeof(pr_buf[1])));
7287 Log("\tVolume gets : min=%s, max=%s, avg=%s, total=%s\n",
7288 DoubleToPrintable(ch_gets.min, pr_buf[0], sizeof(pr_buf[0])),
7289 DoubleToPrintable(ch_gets.max, pr_buf[1], sizeof(pr_buf[1])),
7290 DoubleToPrintable(ch_gets.avg, pr_buf[2], sizeof(pr_buf[2])),
7291 DoubleToPrintable(ch_gets.sum, pr_buf[3], sizeof(pr_buf[3])));
7292 Log("\tHDR gets : min=%s, max=%s, avg=%s, total=%s\n",
7293 DoubleToPrintable(hdr_gets.min, pr_buf[0], sizeof(pr_buf[0])),
7294 DoubleToPrintable(hdr_gets.max, pr_buf[1], sizeof(pr_buf[1])),
7295 DoubleToPrintable(hdr_gets.avg, pr_buf[2], sizeof(pr_buf[2])),
7296 DoubleToPrintable(hdr_gets.sum, pr_buf[3], sizeof(pr_buf[3])));
7297 Log("\tHDR loads : min=%s, max=%s, avg=%s, total=%s\n",
7298 DoubleToPrintable(hdr_loads.min, pr_buf[0], sizeof(pr_buf[0])),
7299 DoubleToPrintable(hdr_loads.max, pr_buf[1], sizeof(pr_buf[1])),
7300 DoubleToPrintable(hdr_loads.avg, pr_buf[2], sizeof(pr_buf[2])),
7301 DoubleToPrintable(hdr_loads.sum, pr_buf[3], sizeof(pr_buf[3])));
7302 } else if (flags & VOL_STATS_PER_CHAIN) {
7303 /* dump simple per-chain stats */
7304 Log("Volume hash chain %d : len=%d, looks=%s, gets=%s, reorders=%s\n",
7306 DoubleToPrintable(ch_looks.sum, pr_buf[0], sizeof(pr_buf[0])),
7307 DoubleToPrintable(ch_gets.sum, pr_buf[1], sizeof(pr_buf[1])),
7308 DoubleToPrintable(ch_reorders.sum, pr_buf[2], sizeof(pr_buf[2])));
7312 VHashEndExclusive_r(head);
7317 /* compute global averages */
7318 len.avg = len.sum / ((double)VolumeHashTable.Size);
7319 looks.avg = looks.sum / ((double)VolumeHashTable.Size);
7320 gets.avg = gets.sum / ((double)VolumeHashTable.Size);
7321 reorders.avg = reorders.sum / ((double)VolumeHashTable.Size);
7323 /* dump global stats */
7324 Log("Volume hash summary: %d buckets\n", VolumeHashTable.Size);
7325 Log(" chain length : min=%s, max=%s, avg=%s, total=%s\n",
7326 DoubleToPrintable(len.min, pr_buf[0], sizeof(pr_buf[0])),
7327 DoubleToPrintable(len.max, pr_buf[1], sizeof(pr_buf[1])),
7328 DoubleToPrintable(len.avg, pr_buf[2], sizeof(pr_buf[2])),
7329 DoubleToPrintable(len.sum, pr_buf[3], sizeof(pr_buf[3])));
7330 Log(" looks : min=%s, max=%s, avg=%s, total=%s\n",
7331 DoubleToPrintable(looks.min, pr_buf[0], sizeof(pr_buf[0])),
7332 DoubleToPrintable(looks.max, pr_buf[1], sizeof(pr_buf[1])),
7333 DoubleToPrintable(looks.avg, pr_buf[2], sizeof(pr_buf[2])),
7334 DoubleToPrintable(looks.sum, pr_buf[3], sizeof(pr_buf[3])));
7335 Log(" gets : min=%s, max=%s, avg=%s, total=%s\n",
7336 DoubleToPrintable(gets.min, pr_buf[0], sizeof(pr_buf[0])),
7337 DoubleToPrintable(gets.max, pr_buf[1], sizeof(pr_buf[1])),
7338 DoubleToPrintable(gets.avg, pr_buf[2], sizeof(pr_buf[2])),
7339 DoubleToPrintable(gets.sum, pr_buf[3], sizeof(pr_buf[3])));
7340 Log(" reorders : min=%s, max=%s, avg=%s, total=%s\n",
7341 DoubleToPrintable(reorders.min, pr_buf[0], sizeof(pr_buf[0])),
7342 DoubleToPrintable(reorders.max, pr_buf[1], sizeof(pr_buf[1])),
7343 DoubleToPrintable(reorders.avg, pr_buf[2], sizeof(pr_buf[2])),
7344 DoubleToPrintable(reorders.sum, pr_buf[3], sizeof(pr_buf[3])));
7346 /* print extended disk related statistics */
7348 struct DiskPartition64 * diskP;
7349 afs_uint32 vol_count[VOLMAXPARTS+1];
7350 byte part_exists[VOLMAXPARTS+1];
7354 memset(vol_count, 0, sizeof(vol_count));
7355 memset(part_exists, 0, sizeof(part_exists));
7359 for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
7361 vol_count[id] = diskP->vol_list.len;
7362 part_exists[id] = 1;
7366 for (i = 0; i <= VOLMAXPARTS; i++) {
7367 if (part_exists[i]) {
7368 /* XXX while this is currently safe, it is a violation
7369 * of the VGetPartitionById_r interface contract. */
7370 diskP = VGetPartitionById_r(i, 0);
7372 Log("Partition %s has %d online volumes\n",
7373 VPartitionPath(diskP), diskP->vol_list.len);
7380 /* print extended VLRU statistics */
7381 if (VVLRUExtStats_r(&vlru_stats, vol_sum) == 0) {
7382 afs_uint32 idx, cur, lpos;
7386 Log("VLRU State Dump:\n\n");
7388 for (idx = VLRU_QUEUE_NEW; idx < VLRU_QUEUE_INVALID; idx++) {
7389 Log("\t%s:\n", vlru_idx_to_string(idx));
7392 for (cur = vlru_stats.queue_info[idx].start;
7393 cur < vlru_stats.queue_info[idx].len;
7395 line[lpos++] = vlru_stats.vec[cur].volid;
7397 Log("\t\t%u, %u, %u, %u, %u,\n",
7398 line[0], line[1], line[2], line[3], line[4]);
7407 Log("\t\t%u, %u, %u, %u, %u\n",
7408 line[0], line[1], line[2], line[3], line[4]);
7413 free(vlru_stats.vec);
7420 VPrintExtendedCacheStats(int flags)
7423 VPrintExtendedCacheStats_r(flags);
7426 #endif /* AFS_DEMAND_ATTACH_FS */