/*
* Copyright 2000, International Business Machines Corporation and others.
* All Rights Reserved.
- *
+ *
* This software has been released under the terms of the IBM Public
* License. For details, see the LICENSE file in the top-level source
* directory or online at http://www.openafs.org/dl/license10.html
#include "volume.h"
#include "partition.h"
#include "volume_inline.h"
+#include "common.h"
+
#ifdef AFS_PTHREAD_ENV
#include <assert.h>
#else /* AFS_PTHREAD_ENV */
pthread_cond_t vol_put_volume_cond;
pthread_cond_t vol_sleep_cond;
pthread_cond_t vol_init_attach_cond;
+pthread_cond_t vol_vinit_cond;
int vol_attach_threads = 1;
#endif /* AFS_PTHREAD_ENV */
extern void *calloc(), *realloc();
#endif
-/*@printflike@*/ extern void Log(const char *format, ...);
-
/* Forward declarations */
static Volume *attach2(Error * ec, VolId volumeId, char *path,
- struct DiskPartition64 *partp, Volume * vp,
+ struct DiskPartition64 *partp, Volume * vp,
int isbusy, int mode);
static void ReallyFreeVolume(Volume * vp);
#ifdef AFS_DEMAND_ATTACH_FS
static void VScanUpdateList(void);
#endif /* !AFS_DEMAND_ATTACH_FS */
static void VInitVolumeHeaderCache(afs_uint32 howMany);
-static int GetVolumeHeader(register Volume * vp);
-static void ReleaseVolumeHeader(register struct volHeader *hd);
-static void FreeVolumeHeader(register Volume * vp);
-static void AddVolumeToHashTable(register Volume * vp, int hashid);
-static void DeleteVolumeFromHashTable(register Volume * vp);
+static int GetVolumeHeader(Volume * vp);
+static void ReleaseVolumeHeader(struct volHeader *hd);
+static void FreeVolumeHeader(Volume * vp);
+static void AddVolumeToHashTable(Volume * vp, int hashid);
+static void DeleteVolumeFromHashTable(Volume * vp);
#if 0
static int VHold(Volume * vp);
#endif
static void VReleaseVolumeHandles_r(Volume * vp);
static void VCloseVolumeHandles_r(Volume * vp);
static void LoadVolumeHeader(Error * ec, Volume * vp);
-static int VCheckOffline(register Volume * vp);
-static int VCheckDetach(register Volume * vp);
+static int VCheckOffline(Volume * vp);
+static int VCheckDetach(Volume * vp);
static Volume * GetVolume(Error * ec, Error * client_ec, VolId volumeId, Volume * hint, int flags);
int LogLevel; /* Vice loglevel--not defined as extern so that it will be
/*
* when possible, don't just reorder single elements, but reorder
* entire chains of elements at once. a chain of elements that
- * exceed the element previous to the pivot by at least CHAIN_THRESH
+ * exceed the element previous to the pivot by at least CHAIN_THRESH
* accesses are moved in front of the chain whose elements have at
* least CHAIN_THRESH less accesses than the pivot element
*/
#endif /* AFS_PTHREAD_ENV */
#ifndef AFS_DEMAND_ATTACH_FS
-static int VAttachVolumesByPartition(struct DiskPartition64 *diskP,
+static int VAttachVolumesByPartition(struct DiskPartition64 *diskP,
int * nAttached, int * nUnattached);
#endif /* AFS_DEMAND_ATTACH_FS */
static void VVByPListWait_r(struct DiskPartition64 * dp);
/* online salvager */
-static int VCheckSalvage(register Volume * vp);
+static int VCheckSalvage(Volume * vp);
#if defined(SALVSYNC_BUILD_CLIENT) || defined(FSSYNC_BUILD_CLIENT)
static int VScheduleSalvage_r(Volume * vp);
#endif
#endif /* AFS_DEMAND_ATTACH_FS */
-struct Lock vol_listLock; /* Lock obtained when listing volumes:
- * prevents a volume from being missed
- * if the volume is attached during a
+struct Lock vol_listLock; /* Lock obtained when listing volumes:
+ * prevents a volume from being missed
+ * if the volume is attached during a
* list volumes */
/***************************************************/
/* Startup routines */
/***************************************************/
+
+#if defined(FAST_RESTART) && defined(AFS_DEMAND_ATTACH_FS)
+# error FAST_RESTART and DAFS are incompatible. For the DAFS equivalent \
+ of FAST_RESTART, use the -unsafe-nosalvage fileserver argument
+#endif
+
/**
* assign default values to a VolumePackageOptions struct.
*
opts->canUseFSSYNC = 0;
opts->canUseSALVSYNC = 0;
+#ifdef FAST_RESTART
+ opts->unsafe_attach = 1;
+#else /* !FAST_RESTART */
+ opts->unsafe_attach = 0;
+#endif /* !FAST_RESTART */
+
switch (pt) {
case fileServer:
opts->canScheduleSalvage = 1;
}
}
+/**
+ * Set VInit to a certain value, and signal waiters.
+ *
+ * @param[in] value the value to set VInit to
+ *
+ * @pre VOL_LOCK held
+ */
+static void
+VSetVInit_r(int value)
+{
+ VInit = value;
+#ifdef AFS_PTHREAD_ENV
+ assert(pthread_cond_broadcast(&vol_vinit_cond) == 0);
+#endif
+}
+
int
VInitVolumePackage2(ProgramType pt, VolumePackageOptions * opts)
{
assert(pthread_cond_init(&vol_put_volume_cond, NULL) == 0);
assert(pthread_cond_init(&vol_sleep_cond, NULL) == 0);
assert(pthread_cond_init(&vol_init_attach_cond, NULL) == 0);
+ assert(pthread_cond_init(&vol_vinit_cond, NULL) == 0);
#else /* AFS_PTHREAD_ENV */
IOMGR_Initialize();
#endif /* AFS_PTHREAD_ENV */
assert(pthread_mutex_init(&vol_salvsync_mutex, NULL) == 0);
#endif /* AFS_DEMAND_ATTACH_FS */
- /* Ok, we have done enough initialization that fileserver can
- * start accepting calls, even though the volumes may not be
+ /* Ok, we have done enough initialization that fileserver can
+ * start accepting calls, even though the volumes may not be
* available just yet.
*/
VInit = 1;
}
}
VOL_LOCK;
- VInit = 2; /* Initialized, and all volumes have been attached */
+ VSetVInit_r(2); /* Initialized, and all volumes have been attached */
LWP_NoYieldSignal(VInitAttachVolumes);
VOL_UNLOCK;
return 0;
assert(pthread_cond_destroy(¶ms.thread_done_cv) == 0);
}
VOL_LOCK;
- VInit = 2; /* Initialized, and all volumes have been attached */
+ VSetVInit_r(2); /* Initialized, and all volumes have been attached */
assert(pthread_cond_broadcast(&vol_init_attach_cond) == 0);
VOL_UNLOCK;
return 0;
}
VOL_LOCK;
- VInit = 2; /* Initialized, and all volumes have been attached */
+ VSetVInit_r(2); /* Initialized, and all volumes have been attached */
assert(pthread_cond_broadcast(&vol_init_attach_cond) == 0);
VOL_UNLOCK;
for (params.n_parts=0, diskP = DiskPartitionList;
diskP; diskP = diskP->next, params.n_parts++);
- Log("VShutdown: shutting down on-line volumes on %d partition%s...\n",
+ Log("VShutdown: shutting down on-line volumes on %d partition%s...\n",
params.n_parts, params.n_parts > 1 ? "s" : "");
if (vol_attach_threads > 1) {
}
Log("VShutdown: partition %s has %d volumes with attached headers\n",
VPartitionPath(diskP), count);
-
+
/* build up the pass 0 shutdown work queue */
dpq = (struct diskpartition_queue_t *) malloc(sizeof(struct diskpartition_queue_t));
(&tid, &attrs, &VShutdownThread,
¶ms) == 0);
}
-
+
/* wait for all the pass 0 shutdowns to complete */
while (params.n_threads_complete < params.n_threads) {
assert(pthread_cond_wait(¶ms.master_cv, ¶ms.lock) == 0);
/* run the parallel shutdown scheduler. it will drop the glock internally */
ShutdownController(¶ms);
-
+
/* wait for all the workers to finish pass 3 and terminate */
while (params.pass < 4) {
VOL_CV_WAIT(¶ms.cv);
}
-
+
assert(pthread_attr_destroy(&attrs) == 0);
assert(pthread_cond_destroy(¶ms.cv) == 0);
assert(pthread_cond_destroy(¶ms.master_cv) == 0);
VShutdown_r(void)
{
int i;
- register Volume *vp, *np;
- register afs_int32 code;
+ Volume *vp, *np;
+ afs_int32 code;
if (VInit < 2) {
Log("VShutdown: aborting attach volumes\n");
if (LogLevel >= 5)
Log("VShutdown: Attempting to take volume %u offline.\n",
vp->hashid);
-
+
/* next, take the volume offline (drops reference count) */
VOffline_r(vp, "File server was shut down");
}
for (diskP = DiskPartitionList; diskP; diskP=diskP->next) {
id = diskP->index;
Log("ShutdownController: part[%d] : (len=%d, thread_target=%d, done_pass=%d, pass_head=%p)\n",
- id,
+ id,
diskP->vol_list.len,
- shadow.part_thread_target[id],
- shadow.part_done_pass[id],
+ shadow.part_thread_target[id],
+ shadow.part_done_pass[id],
shadow.part_pass_head[id]);
}
/* create the shutdown thread work schedule.
* this scheduler tries to implement fairness
- * by allocating at least 1 thread to each
+ * by allocating at least 1 thread to each
* partition with volumes to be shutdown,
* and then it attempts to allocate remaining
* threads based upon the amount of work left
for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
sum += diskP->vol_list.len;
}
-
+
params->schedule_version++;
params->vol_remaining = sum;
/* compute the residues */
for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
id = diskP->index;
- part_residue[id] = diskP->vol_list.len -
+ part_residue[id] = diskP->vol_list.len -
(params->part_thread_target[id] * thr_workload);
}
break;
}
}
-
+
if (!found) {
- /* hmm. for some reason the controller thread couldn't find anything for
+ /* hmm. for some reason the controller thread couldn't find anything for
* us to do. let's see if there's anything we can do */
for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
id = diskP->index;
}
}
}
-
+
/* do work on this partition until either the controller
* creates a new schedule, or we run out of things to do
* on this partition */
}
pass = params->pass;
}
-
+
/* for fairness */
VOL_UNLOCK;
pthread_yield();
return NULL;
}
-/* shut down all volumes on a given disk partition
+/* shut down all volumes on a given disk partition
*
* note that this function will not allow mp-fast
* shutdown of a partition */
VVByPListBeginExclusive_r(dp);
/* pick the low-hanging fruit first,
- * then do the complicated ones last
+ * then do the complicated ones last
* (has the advantage of keeping
* in-use volumes up until the bitter end) */
for (pass = 0, total=0; pass < 4; pass++) {
* 0 to only "shutdown" {pre,un}attached and error state volumes
* 1 to also shutdown attached volumes w/ volume header loaded
* 2 to also shutdown attached volumes w/o volume header loaded
- * 3 to also shutdown exclusive state volumes
+ * 3 to also shutdown exclusive state volumes
*
* caller MUST hold exclusive access on the hash chain
* because we drop vol_glock_mutex internally
- *
- * this function is reentrant for passes 1--3
- * (e.g. multiple threads can cooperate to
+ *
+ * this function is reentrant for passes 1--3
+ * (e.g. multiple threads can cooperate to
* shutdown a partition mp-fast)
*
* pass 0 is not scaleable because the volume state data is
ShutdownVByPForPass_r(struct DiskPartition64 * dp, int pass)
{
struct rx_queue * q = queue_First(&dp->vol_list, rx_queue);
- register int i = 0;
+ int i = 0;
while (ShutdownVolumeWalk_r(dp, pass, &q))
i++;
for (queue_ScanFrom(&dp->vol_list, qp, qp, nqp, rx_queue)) {
vp = (Volume *) (((char *)qp) - offsetof(Volume, vol_list));
-
+
switch (pass) {
case 0:
if ((V_attachState(vp) != VOL_STATE_UNATTACHED) &&
(V_attachState(vp) != VOL_STATE_ERROR) &&
+ (V_attachState(vp) != VOL_STATE_DELETED) &&
(V_attachState(vp) != VOL_STATE_PREATTACHED)) {
break;
}
VWaitExclusiveState_r(vp);
assert(VIsValidState(V_attachState(vp)));
-
+
switch(V_attachState(vp)) {
case VOL_STATE_SALVAGING:
/* Leave salvaging volumes alone. Any in-progress salvages will
case VOL_STATE_ERROR:
VChangeState_r(vp, VOL_STATE_UNATTACHED);
case VOL_STATE_UNATTACHED:
+ case VOL_STATE_DELETED:
break;
case VOL_STATE_GOING_OFFLINE:
case VOL_STATE_SHUTTING_DOWN:
default:
break;
}
-
+
VCancelReservation_r(vp);
vp = NULL;
return 0;
return;
}
- if (FDH_SEEK(fdP, 0, SEEK_SET) < 0) {
- *ec = VSALVAGE;
- FDH_REALLYCLOSE(fdP);
- return;
- }
vsn = (struct versionStamp *)to;
- if (FDH_READ(fdP, to, size) != size || vsn->magic != magic) {
+ if (FDH_PREAD(fdP, to, size, 0) != size || vsn->magic != magic) {
*ec = VSALVAGE;
FDH_REALLYCLOSE(fdP);
return;
*ec = VSALVAGE;
return;
}
- if (FDH_SEEK(fdP, 0, SEEK_SET) < 0) {
- *ec = VSALVAGE;
- FDH_REALLYCLOSE(fdP);
- return;
- }
- if (FDH_WRITE(fdP, (char *)&V_disk(vp), sizeof(V_disk(vp)))
+ if (FDH_PWRITE(fdP, (char *)&V_disk(vp), sizeof(V_disk(vp)), 0)
!= sizeof(V_disk(vp))) {
*ec = VSALVAGE;
FDH_REALLYCLOSE(fdP);
* Converts an on-disk representation of a volume header to
* the in-memory representation of a volume header.
*
- * Makes the assumption that AFS has *always*
+ * Makes the assumption that AFS has *always*
* zero'd the volume header file so that high parts of inode
* numbers are 0 in older (SGI EFS) volume header files.
*/
* @return volume object pointer
*
* @note A pre-attached volume will only have its partition
- * and hashid fields initialized. At first call to
+ * and hashid fields initialized. At first call to
* VGetVolume, the volume will be fully attached.
*
*/
Volume *
VPreAttachVolumeByName_r(Error * ec, char *partition, char *name)
{
- return VPreAttachVolumeById_r(ec,
+ return VPreAttachVolumeById_r(ec,
partition,
VolumeNumber(name));
}
* @internal volume package internal use only.
*/
Volume *
-VPreAttachVolumeById_r(Error * ec,
+VPreAttachVolumeById_r(Error * ec,
char * partition,
VolId volumeId)
{
* properly in this case.
*
* @note If there is already a volume object registered with
- * the same volume id, its pointer MUST be passed as
+ * the same volume id, its pointer MUST be passed as
* argument vp. Failure to do so will result in a silent
* failure to preattach.
*
* @internal volume package internal use only.
*/
-Volume *
-VPreAttachVolumeByVp_r(Error * ec,
- struct DiskPartition64 * partp,
+Volume *
+VPreAttachVolumeByVp_r(Error * ec,
+ struct DiskPartition64 * partp,
Volume * vp,
VolId vid)
{
*ec = 0;
/* check to see if pre-attach already happened */
- if (vp &&
- (V_attachState(vp) != VOL_STATE_UNATTACHED) &&
+ if (vp &&
+ (V_attachState(vp) != VOL_STATE_UNATTACHED) &&
+ (V_attachState(vp) != VOL_STATE_DELETED) &&
(V_attachState(vp) != VOL_STATE_PREATTACHED) &&
!VIsErrorState(V_attachState(vp))) {
/*
vp = nvp;
goto done;
} else {
- /* hack to make up for VChangeState_r() decrementing
+ /* hack to make up for VChangeState_r() decrementing
* the old state counter */
VStats.state_levels[0]++;
}
Volume *
VAttachVolumeByName_r(Error * ec, char *partition, char *name, int mode)
{
- register Volume *vp = NULL;
+ Volume *vp = NULL;
struct DiskPartition64 *partp;
char path[64];
int isbusy = 0;
#endif /* AFS_DEMAND_ATTACH_FS */
*ec = 0;
-
+
volumeId = VolumeNumber(name);
if (!(partp = VGetPartition_r(partition, 0))) {
* - GOING_OFFLINE
* - SALVAGING
* - ERROR
+ * - DELETED
*/
if (vp->specialStatus == VBUSY)
isbusy = 1;
-
+
/* if it's already attached, see if we can return it */
if (V_attachState(vp) == VOL_STATE_ATTACHED) {
VGetVolumeByVp_r(ec, vp);
}
/* pre-attach volume if it hasn't been done yet */
- if (!vp ||
+ if (!vp ||
(V_attachState(vp) == VOL_STATE_UNATTACHED) ||
+ (V_attachState(vp) == VOL_STATE_DELETED) ||
(V_attachState(vp) == VOL_STATE_ERROR)) {
svp = vp;
vp = VPreAttachVolumeByVp_r(ec, partp, vp, volumeId);
assert(vp != NULL);
- /* handle pre-attach races
+ /* handle pre-attach races
*
* multiple threads can race to pre-attach a volume,
* but we can't let them race beyond that
- *
+ *
* our solution is to let the first thread to bring
* the volume into an exclusive state win; the other
* threads just wait until it finishes bringing the
if (*ec != VSALVAGING)
#endif /* AFS_DEMAND_ATTACH_FS */
FSYNC_VolOp(volumeId, partition, FSYNC_VOL_ON, 0, NULL);
- } else
+ } else
#endif
if (programType == fileServer && vp) {
#ifdef AFS_DEMAND_ATTACH_FS
- /*
+ /*
* we can get here in cases where we don't "own"
* the volume (e.g. volume owned by a utility).
* short circuit around potential disk header races.
/* volume utility should never call AttachByVp */
assert(programType == fileServer);
-
+
volumeId = vp->hashid;
partp = vp->partition;
VolumeExternalName_r(volumeId, name, sizeof(name));
}
/* pre-attach volume if it hasn't been done yet */
- if (!vp ||
+ if (!vp ||
(V_attachState(vp) == VOL_STATE_UNATTACHED) ||
+ (V_attachState(vp) == VOL_STATE_DELETED) ||
(V_attachState(vp) == VOL_STATE_ERROR)) {
nvp = VPreAttachVolumeByVp_r(ec, partp, vp, volumeId);
if (*ec) {
vp = nvp;
}
}
-
+
assert(vp != NULL);
VChangeState_r(vp, VOL_STATE_ATTACHING);
* for any reason, skip to the end. We cannot
* safely call VUpdateVolume unless we "own" it.
*/
- if (*ec ||
+ if (*ec ||
(vp == NULL) ||
(V_attachState(vp) != VOL_STATE_ATTACHED)) {
goto done;
IncUInt64(&vp->stats.hdr_loads);
VOL_UNLOCK;
#endif /* AFS_DEMAND_ATTACH_FS */
-
+
if (*ec) {
Log("VAttachVolume: Error reading diskDataHandle header for vol %lu; "
"error=%u\n", afs_printable_uint32_lu(volid), *ec);
/* have we read in the header successfully? */
int read_header = 0;
+#ifdef AFS_DEMAND_ATTACH_FS
/* should we FreeVolume(vp) instead of VCheckFree(vp) in the error
* cleanup? */
int forcefree = 0;
+ /* in the case of an error, to what state should the volume be
+ * transitioned? */
+ VolState error_state = VOL_STATE_ERROR;
+#endif /* AFS_DEMAND_ATTACH_FS */
+
*ec = 0;
vp->vnodeIndex[vLarge].handle = NULL;
VOL_LOCK;
vp->nextVnodeUnique = V_uniquifier(vp);
-#ifndef FAST_RESTART
if (VShouldCheckInUse(mode) && V_inUse(vp) && VolumeWriteable(vp)) {
if (!V_needsSalvaged(vp)) {
V_needsSalvaged(vp) = 1;
goto error;
}
-#endif /* FAST_RESTART */
if (programType == fileServer && V_destroyMe(vp) == DESTROY_ME) {
/* Only check destroyMe if we are the fileserver, since the
VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, VOL_SALVAGE_INVALIDATE_HEADER);
VChangeState_r(vp, VOL_STATE_ERROR);
vp->nUsers = 0;
+ forcefree = 1;
#endif /* AFS_DEMAND_ATTACH_FS */
Log("VAttachVolume: volume %s is junk; it should be destroyed at next salvage\n", path);
*ec = VNOVOL;
- forcefree = 1;
goto error;
}
V_inUse(vp) = fileServer;
V_offlineMessage(vp)[0] = '\0';
}
+ if (!V_inUse(vp)) {
+ *ec = VNOVOL;
+#ifdef AFS_DEMAND_ATTACH_FS
+ /* Put the vol into PREATTACHED state, so if someone tries to
+ * access it again, we try to attach, see that we're not blessed,
+ * and give a VNOVOL error again. Putting it into UNATTACHED state
+ * would result in a VOFFLINE error instead. */
+ error_state = VOL_STATE_PREATTACHED;
+#endif /* AFS_DEMAND_ATTACH_FS */
+
+ /* mimic e.g. GetVolume errors */
+ if (!V_blessed(vp)) {
+ Log("Volume %lu offline: not blessed\n", afs_printable_uint32_lu(V_id(vp)));
+ FreeVolumeHeader(vp);
+ } else if (!V_inService(vp)) {
+ Log("Volume %lu offline: not in service\n", afs_printable_uint32_lu(V_id(vp)));
+ FreeVolumeHeader(vp);
+ } else {
+ Log("Volume %lu offline: needs salvage\n", afs_printable_uint32_lu(V_id(vp)));
+ *ec = VSALVAGE;
+#ifdef AFS_DEMAND_ATTACH_FS
+ error_state = VOL_STATE_ERROR;
+ /* see if we can recover */
+ VRequestSalvage_r(ec, vp, SALVSYNC_NEEDED, VOL_SALVAGE_INVALIDATE_HEADER);
+#endif
+ }
+#ifdef AFS_DEMAND_ATTACH_FS
+ vp->nUsers = 0;
+#endif
+ goto error;
+ }
} else {
#ifdef AFS_DEMAND_ATTACH_FS
if ((mode != V_PEEK) && (mode != V_SECRETLY))
error:
#ifdef AFS_DEMAND_ATTACH_FS
if (!VIsErrorState(V_attachState(vp))) {
- VChangeState_r(vp, VOL_STATE_ERROR);
+ VChangeState_r(vp, error_state);
}
#endif /* AFS_DEMAND_ATTACH_FS */
char *part, *name;
VGetVolumePath(ec, volumeId, &part, &name);
if (*ec) {
- register Volume *vp;
+ Volume *vp;
Error error;
vp = VGetVolume_r(&error, volumeId);
if (vp) {
* is dropped within VHold */
#ifdef AFS_DEMAND_ATTACH_FS
static int
-VHold_r(register Volume * vp)
+VHold_r(Volume * vp)
{
Error error;
}
#else /* AFS_DEMAND_ATTACH_FS */
static int
-VHold_r(register Volume * vp)
+VHold_r(Volume * vp)
{
Error error;
#if 0
static int
-VHold(register Volume * vp)
+VHold(Volume * vp)
{
int retVal;
VOL_LOCK;
* @internal volume package internal use only
*/
void
-VPutVolume_r(register Volume * vp)
+VPutVolume_r(Volume * vp)
{
assert(--vp->nUsers >= 0);
if (vp->nUsers == 0) {
}
void
-VPutVolume(register Volume * vp)
+VPutVolume(Volume * vp)
{
VOL_LOCK;
VPutVolume_r(vp);
/* try to get a volume we've previously looked up */
/* for demand attach fs, caller MUST NOT hold a ref count on vp */
-Volume *
+Volume *
VGetVolumeByVp_r(Error * ec, Volume * vp)
{
return GetVolume(ec, NULL, vp->hashid, vp, 0);
Volume *avp, * rvp = hint;
#endif
- /*
+ /*
* if VInit is zero, the volume package dynamic
* data structures have not been initialized yet,
* and we must immediately return an error
VGET_CTR_INC(V3);
IncUInt64(&VStats.hdr_gets);
-
+
#ifdef AFS_DEMAND_ATTACH_FS
/* block if someone else is performing an exclusive op on this volume */
if (rvp != vp) {
}
/*
- * short circuit with VOFFLINE in the following circumstances:
- *
- * - VOL_STATE_UNATTACHED
+ * short circuit with VOFFLINE for VOL_STATE_UNATTACHED and
+ * VNOVOL for VOL_STATE_DELETED
*/
- if (V_attachState(vp) == VOL_STATE_UNATTACHED) {
+ if ((V_attachState(vp) == VOL_STATE_UNATTACHED) ||
+ (V_attachState(vp) == VOL_STATE_DELETED)) {
if (vp->specialStatus) {
*ec = vp->specialStatus;
+ } else if (V_attachState(vp) == VOL_STATE_DELETED) {
+ *ec = VNOVOL;
} else {
*ec = VOFFLINE;
}
* not VolOpRunningUnknown (attach2 would have converted it to Online
* or Offline)
*/
-
+
/* only valid before/during demand attachment */
assert(!vp->pending_vol_op || vp->pending_vol_op->vol_op_state != FSSYNC_VolOpRunningUnknown);
-
+
/* deny getvolume due to running mutually exclusive vol op */
if (vp->pending_vol_op && vp->pending_vol_op->vol_op_state==FSSYNC_VolOpRunningOffline) {
- /*
+ /*
* volume cannot remain online during this volume operation.
- * notify client.
+ * notify client.
*/
if (vp->specialStatus) {
/*
#endif /* AFS_DEMAND_ATTACH_FS */
break;
}
-
+
VGET_CTR_INC(V7);
if (vp->shuttingDown) {
VGET_CTR_INC(V8);
/* caller MUST hold a heavyweight ref on vp */
#ifdef AFS_DEMAND_ATTACH_FS
void
-VTakeOffline_r(register Volume * vp)
+VTakeOffline_r(Volume * vp)
{
Error error;
}
#else /* AFS_DEMAND_ATTACH_FS */
void
-VTakeOffline_r(register Volume * vp)
+VTakeOffline_r(Volume * vp)
{
assert(vp->nUsers > 0);
assert(programType == fileServer);
#endif /* AFS_DEMAND_ATTACH_FS */
void
-VTakeOffline(register Volume * vp)
+VTakeOffline(Volume * vp)
{
VOL_LOCK;
VTakeOffline_r(vp);
*
* @post needsSalvaged flag is set.
* for DAFS, salvage is requested.
- * no further references to the volume through the volume
+ * no further references to the volume through the volume
* package will be honored.
* all file descriptor and vnode caches are invalidated.
*
* @warning this is a heavy-handed interface. it results in
- * a volume going offline regardless of the current
+ * a volume going offline regardless of the current
* reference count state.
*
* @internal volume package internal use only
while (!VIsOfflineState(V_attachState(vp))) {
/* do not give corrupted volumes to the volserver */
if (vp->salvage.requested && vp->pending_vol_op->com.programType != salvageServer) {
- *ec = 1;
+ *ec = 1;
goto error;
}
VWaitStateChange_r(vp);
}
- *ec = 0;
+ *ec = 0;
error:
VCancelReservation_r(vp);
}
void
VDetachVolume_r(Error * ec, Volume * vp)
{
+#ifdef FSSYNC_BUILD_CLIENT
VolumeId volume;
struct DiskPartition64 *tpartp;
int notifyServer = 0;
int useDone = FSYNC_VOL_ON;
- *ec = 0; /* always "succeeds" */
if (VCanUseFSSYNC()) {
notifyServer = vp->needsPutBack;
if (V_destroyMe(vp) == DESTROY_ME)
}
tpartp = vp->partition;
volume = V_id(vp);
+#endif /* FSSYNC_BUILD_CLIENT */
+
+ *ec = 0; /* always "succeeds" */
DeleteVolumeFromHashTable(vp);
vp->shuttingDown = 1;
#ifdef AFS_DEMAND_ATTACH_FS
VLRU_Delete_r(vp);
VChangeState_r(vp, VOL_STATE_SHUTTING_DOWN);
#else
- if (programType != fileServer)
+ if (programType != fileServer)
V_inUse(vp) = 0;
#endif /* AFS_DEMAND_ATTACH_FS */
VPutVolume_r(vp);
*/
#ifdef FSSYNC_BUILD_CLIENT
if (VCanUseFSSYNC() && notifyServer) {
- /*
- * Note: The server is not notified in the case of a bogus volume
- * explicitly to make it possible to create a volume, do a partial
- * restore, then abort the operation without ever putting the volume
- * online. This is essential in the case of a volume move operation
- * between two partitions on the same server. In that case, there
- * would be two instances of the same volume, one of them bogus,
- * which the file server would attempt to put on line
+ /*
+ * Note: The server is not notified in the case of a bogus volume
+ * explicitly to make it possible to create a volume, do a partial
+ * restore, then abort the operation without ever putting the volume
+ * online. This is essential in the case of a volume move operation
+ * between two partitions on the same server. In that case, there
+ * would be two instances of the same volume, one of them bogus,
+ * which the file server would attempt to put on line
*/
FSYNC_VolOp(volume, tpartp->name, useDone, 0, NULL);
/* XXX this code path is only hit by volume utilities, thus
/* demand attach fs
*
* XXX need to investigate whether we can perform
- * DFlushVolume outside of vol_glock_mutex...
+ * DFlushVolume outside of vol_glock_mutex...
*
* VCloseVnodeFiles_r drops the glock internally */
DFlushVolume(vp->hashid);
/* For both VForceOffline and VOffline, we close all relevant handles.
* For VOffline, if we re-attach the volume, the files may possible be
- * different than before.
+ * different than before.
*/
/* for demand attach, caller MUST hold a ref count on vp */
static void
if (*ec) {
Log("VUpdateVolume: error updating volume header, volume %u (%s)\n",
V_id(vp), V_name(vp));
- /* try to update on-disk header,
+ /* try to update on-disk header,
* while preventing infinite recursion */
if (!(flags & VOL_UPDATE_NOFORCEOFF)) {
VForceOffline_r(vp, VOL_FORCEOFF_NOUPDATE);
* returns 1 if volume was freed, 0 otherwise */
#ifdef AFS_DEMAND_ATTACH_FS
static int
-VCheckDetach(register Volume * vp)
+VCheckDetach(Volume * vp)
{
int ret = 0;
Error ec = 0;
}
#else /* AFS_DEMAND_ATTACH_FS */
static int
-VCheckDetach(register Volume * vp)
+VCheckDetach(Volume * vp)
{
int ret = 0;
Error ec = 0;
* return 1 if volume went offline, 0 otherwise */
#ifdef AFS_DEMAND_ATTACH_FS
static int
-VCheckOffline(register Volume * vp)
+VCheckOffline(Volume * vp)
{
int ret = 0;
assert((V_attachState(vp) != VOL_STATE_ATTACHED) &&
(V_attachState(vp) != VOL_STATE_FREED) &&
(V_attachState(vp) != VOL_STATE_PREATTACHED) &&
- (V_attachState(vp) != VOL_STATE_UNATTACHED));
+ (V_attachState(vp) != VOL_STATE_UNATTACHED) &&
+ (V_attachState(vp) != VOL_STATE_DELETED));
/* valid states:
*
}
#else /* AFS_DEMAND_ATTACH_FS */
static int
-VCheckOffline(register Volume * vp)
+VCheckOffline(Volume * vp)
{
int ret = 0;
* from free()ing the Volume struct during an async i/o op */
/* register with the async volume op ref counter */
-/* VCreateReservation_r moved into inline code header because it
- * is now needed in vnode.c -- tkeiser 11/20/2007
+/* VCreateReservation_r moved into inline code header because it
+ * is now needed in vnode.c -- tkeiser 11/20/2007
*/
/**
*
* @internal volume package internal use only
*
- * @pre
+ * @pre
* @arg VOL_LOCK is held
* @arg lightweight refcount held
*
int ret = 0;
if ((vp->nUsers == 0) &&
(vp->nWaiters == 0) &&
- !(V_attachFlags(vp) & (VOL_IN_HASH |
- VOL_ON_VBYP_LIST |
+ !(V_attachFlags(vp) & (VOL_IN_HASH |
+ VOL_ON_VBYP_LIST |
VOL_IS_BUSY |
VOL_ON_VLRU))) {
ReallyFreeVolume(vp);
* @internal volume package internal use only.
*/
static int
-VCheckSalvage(register Volume * vp)
+VCheckSalvage(Volume * vp)
{
int ret = 0;
#if defined(SALVSYNC_BUILD_CLIENT) || defined(FSSYNC_BUILD_CLIENT)
* @param[in] flags see flags note below
*
* @note flags:
- * VOL_SALVAGE_INVALIDATE_HEADER causes volume header cache entry
+ * VOL_SALVAGE_INVALIDATE_HEADER causes volume header cache entry
* to be invalidated.
*
* @pre VOL_LOCK is held.
code = 1;
}
if (flags & VOL_SALVAGE_INVALIDATE_HEADER) {
- /* Instead of ReleaseVolumeHeader, we do FreeVolumeHeader()
- so that the the next VAttachVolumeByVp_r() invocation
- of attach2() will pull in a cached header
- entry and fail, then load a fresh one from disk and attach
- it to the volume.
+ /* Instead of ReleaseVolumeHeader, we do FreeVolumeHeader()
+ so that the the next VAttachVolumeByVp_r() invocation
+ of attach2() will pull in a cached header
+ entry and fail, then load a fresh one from disk and attach
+ it to the volume.
*/
FreeVolumeHeader(vp);
}
*
* @note DAFS fileserver only
*
- * @note this should be called whenever a VGetVolume fails due to a
+ * @note this should be called whenever a VGetVolume fails due to a
* pending salvage request
*
* @todo should set exclusive state and drop glock around salvsync call
now = FT_ApproxTime();
/* update the salvageserver priority queue occasionally so that
- * frequently requested volumes get moved to the head of the queue
+ * frequently requested volumes get moved to the head of the queue
*/
if ((vp->salvage.scheduled) &&
(vp->stats.last_salvage_req < (now-SALVAGE_PRIO_UPDATE_INTERVAL))) {
* @retval 0 salvage scheduled successfully
* @retval 1 salvage not scheduled, or SALVSYNC/FSSYNC com error
*
- * @pre
+ * @pre
* @arg VOL_LOCK is held.
* @arg nUsers and nWaiters should be zero.
*
* to avoid fssync deadlocks
*/
if (!vp->salvage.scheduled) {
- /* if we haven't previously scheduled a salvage, do so now
+ /* if we haven't previously scheduled a salvage, do so now
*
* set the volume to an exclusive state and drop the lock
* around the SALVSYNC call
* @return operation status
* @retval 0 success
*
- * @pre
+ * @pre
* @arg VOL_LOCK is held.
* @arg client should have a live connection to the salvageserver.
*
*/
int
VDisconnectSALV_r(void)
-{
+{
return SALVSYNC_clientFinis();
}
* @retval 0 failure
* @retval 1 success
*
- * @pre
+ * @pre
* @arg VOL_LOCK is held.
* @arg client should have a live connection to the salvageserver.
*
* @retval 0 failure
* @retval 1 success
*
- * @pre
+ * @pre
* @arg VInit must equal 2.
* @arg Program Type must not be fileserver or salvager.
*
* @retval 0 failure
* @retval 1 success
*
- * @pre
+ * @pre
* @arg VInit must equal 2.
* @arg Program Type must not be fileserver or salvager.
* @arg VOL_LOCK is held.
VConnectFS_r(void)
{
int rc;
- assert((VInit == 2) &&
+ assert((VInit == 2) &&
(programType != fileServer) &&
(programType != salvager));
rc = FSYNC_clientInit();
- if (rc)
- VInit = 3;
+ if (rc) {
+ VSetVInit_r(3);
+ }
return rc;
}
/**
* disconnect from the fileserver SYNC service.
*
- * @pre
+ * @pre
* @arg client should have a live connection to the fileserver.
* @arg VOL_LOCK is held.
* @arg Program Type must not be fileserver or salvager.
assert((programType != fileServer) &&
(programType != salvager));
FSYNC_clientFinis();
- VInit = 2;
+ VSetVInit_r(2);
}
/**
*/
int
-VAllocBitmapEntry_r(Error * ec, Volume * vp,
+VAllocBitmapEntry_r(Error * ec, Volume * vp,
struct vnodeIndex *index, int flags)
{
int ret = 0;
- register byte *bp, *ep;
+ byte *bp, *ep;
#ifdef AFS_DEMAND_ATTACH_FS
VolState state_save;
#endif /* AFS_DEMAND_ATTACH_FS */
}
int
-VAllocBitmapEntry(Error * ec, Volume * vp, register struct vnodeIndex * index)
+VAllocBitmapEntry(Error * ec, Volume * vp, struct vnodeIndex * index)
{
int retVal;
VOL_LOCK;
}
void
-VFreeBitMapEntry_r(Error * ec, register struct vnodeIndex *index,
+VFreeBitMapEntry_r(Error * ec, struct vnodeIndex *index,
unsigned bitNumber)
{
unsigned int offset;
}
void
-VFreeBitMapEntry(Error * ec, register struct vnodeIndex *index,
+VFreeBitMapEntry(Error * ec, struct vnodeIndex *index,
unsigned bitNumber)
{
VOL_LOCK;
assert(vip->bitmap != NULL);
vip->bitmapOffset = 0;
#endif /* BITMAP_LATER */
- if (STREAM_SEEK(file, vcp->diskSize, 0) != -1) {
+ if (STREAM_ASEEK(file, vcp->diskSize) != -1) {
int bitNumber = 0;
for (bitNumber = 0; bitNumber < nVnodes + 100; bitNumber++) {
if (STREAM_READ(vnode, vcp->diskSize, 1, file) != 1)
*------------------------------------------------------------------------*/
int
-VAdjustVolumeStatistics_r(register Volume * vp)
+VAdjustVolumeStatistics_r(Volume * vp)
{
unsigned int now = FT_ApproxTime();
if (now - V_dayUseDate(vp) > OneDay) {
- register int ndays, i;
+ int ndays, i;
ndays = (now - V_dayUseDate(vp)) / OneDay;
for (i = 6; i > ndays - 1; i--)
} /*VAdjustVolumeStatistics */
int
-VAdjustVolumeStatistics(register Volume * vp)
+VAdjustVolumeStatistics(Volume * vp)
{
int retVal;
VOL_LOCK;
}
void
-VBumpVolumeUsage_r(register Volume * vp)
+VBumpVolumeUsage_r(Volume * vp)
{
unsigned int now = FT_ApproxTime();
V_accessDate(vp) = now;
}
void
-VBumpVolumeUsage(register Volume * vp)
+VBumpVolumeUsage(Volume * vp)
{
VOL_LOCK;
VBumpVolumeUsage_r(vp);
* initialization level indicates that all volumes are attached,
* which implies that all partitions are initialized. */
#ifdef AFS_PTHREAD_ENV
- sleep(10);
+ VOL_CV_WAIT(&vol_vinit_cond);
#else /* AFS_PTHREAD_ENV */
IOMGR_Sleep(10);
#endif /* AFS_PTHREAD_ENV */
static void
VScanUpdateList(void)
{
- register int i, gap;
- register Volume *vp;
+ int i, gap;
+ Volume *vp;
Error error;
afs_uint32 now = FT_ApproxTime();
/* Be careful with this code, since it works with interleaved calls to AddToVolumeUpdateList */
* in order to speed up fileserver shutdown
*
* (1) by soft detach we mean a process very similar
- * to VOffline, except the final state of the
+ * to VOffline, except the final state of the
* Volume will be VOL_STATE_PREATTACHED, instead
* of the usual VOL_STATE_UNATTACHED
*/
* @note DAFS only
*
* @note valid option parameters are:
- * @arg @c VLRU_SET_THRESH
+ * @arg @c VLRU_SET_THRESH
* set the period of inactivity after which
* volumes are eligible for soft detachment
- * @arg @c VLRU_SET_INTERVAL
+ * @arg @c VLRU_SET_INTERVAL
* set the time interval between calls
* to the volume LRU "garbage collector"
- * @arg @c VLRU_SET_MAX
+ * @arg @c VLRU_SET_MAX
* set the max number of volumes to deallocate
* in one GC pass
*/
*
* @post VLRU scanner thread internal timing parameters are computed
*
- * @note computes internal timing parameters based upon user-modifiable
+ * @note computes internal timing parameters based upon user-modifiable
* tunable parameters.
*
* @note DAFS only
*
* @note DAFS only
*
- * @todo We should probably set volume state to something exlcusive
+ * @todo We should probably set volume state to something exlcusive
* (as @c VLRU_Add_r does) prior to dropping @c VOL_LOCK.
*
* @internal volume package internal use only.
VLRU_Wait_r(&volume_LRU.q[idx]);
} while (idx != vp->vlru.idx);
- /* now remove from the VLRU and update
+ /* now remove from the VLRU and update
* the appropriate counter */
queue_Remove(&vp->vlru);
volume_LRU.q[idx].len--;
*
* @param[in] vp pointer to volume object
* @param[in] new_idx index of VLRU queue onto which the volume will be moved
- * @param[in] append controls whether the volume will be appended or
+ * @param[in] append controls whether the volume will be appended or
* prepended to the queue. A nonzero value means it will
* be appended; zero means it will be prepended.
*
- * @pre The new (and old, if applicable) queue(s) must either be owned
+ * @pre The new (and old, if applicable) queue(s) must either be owned
* exclusively by the calling thread for asynchronous manipulation,
* or the queue(s) must be quiescent and VOL_LOCK must be held.
* Please see VLRU_BeginExclusive_r, VLRU_EndExclusive_r and VLRU_Wait_r
queue_Remove(&vp->vlru);
volume_LRU.q[vp->vlru.idx].len--;
-
+
/* put the volume back on the correct generational queue */
if (append) {
queue_Append(&volume_LRU.q[new_idx], &vp->vlru);
afs_uint32 now, min_delay, delay;
int i, min_idx, min_op, overdue, state;
- /* set t=0 for promotion cycle to be
+ /* set t=0 for promotion cycle to be
* fileserver startup */
now = FT_ApproxTime();
for (i=0; i < VLRU_GENERATIONS-1; i++) {
*
* @arg The volume has been accessed since the last promotion:
* @c (vp->stats.last_get >= vp->stats.last_promote)
- * @arg The last promotion occurred at least
+ * @arg The last promotion occurred at least
* @c volume_LRU.promotion_interval[idx] seconds ago
*
* As a performance optimization, promotions are "globbed". In other
* words, we promote arbitrarily large contiguous sublists of elements
- * as one operation.
+ * as one operation.
*
* @param[in] idx VLRU queue index to scan
*
* demotion passes */
if (salv_flag_vec &&
!(V_attachFlags(vp) & VOL_HDR_DONTSALV) &&
- demote &&
+ demote &&
(vp->updateTime < (now - SALVAGE_INTERVAL)) &&
(V_attachState(vp) == VOL_STATE_ATTACHED)) {
salv_flag_vec[salv_vec_offset++] = vp;
return ret;
}
-/* check whether volume should be made a
+/* check whether volume should be made a
* soft detach candidate */
static int
VCheckSoftDetachCandidate(Volume * vp, afs_uint32 thresh)
case VOL_STATE_GOING_OFFLINE:
case VOL_STATE_SHUTTING_DOWN:
case VOL_STATE_SALVAGING:
+ case VOL_STATE_DELETED:
volume_LRU.q[vp->vlru.idx].len--;
/* create and cancel a reservation to
/* vhold drops the glock, so now we should
* check to make sure we aren't racing against
* other threads. if we are racing, offlining vp
- * would be wasteful, and block the scanner for a while
+ * would be wasteful, and block the scanner for a while
*/
- if (vp->nWaiters ||
+ if (vp->nWaiters ||
(vp->nUsers > 1) ||
(vp->shuttingDown) ||
(vp->goingOffline) ||
/* Volume Header Cache routines */
/***************************************************/
-/**
+/**
* volume header cache.
*/
struct volume_hdr_LRU_t volume_hdr_LRU;
*
* @pre VOL_LOCK held. Function has never been called before.
*
- * @post howMany cache entries are allocated, initialized, and added
+ * @post howMany cache entries are allocated, initialized, and added
* to the LRU list. Header cache statistics are initialized.
*
* @note only applicable to fileServer program type. Should only be
static void
VInitVolumeHeaderCache(afs_uint32 howMany)
{
- register struct volHeader *hp;
+ struct volHeader *hp;
if (programType != fileServer)
return;
queue_Init(&volume_hdr_LRU);
*
* @pre VOL_LOCK held. For DAFS, lightweight ref must be held on volume object.
*
- * @post volume header attached to volume object. if necessary, header cache
+ * @post volume header attached to volume object. if necessary, header cache
* entry on LRU is synchronized to disk. Header is removed from LRU list.
*
* @note VOL_LOCK may be dropped
* @internal volume package internal use only.
*/
static int
-GetVolumeHeader(register Volume * vp)
+GetVolumeHeader(Volume * vp)
{
Error error;
- register struct volHeader *hd;
+ struct volHeader *hd;
int old;
static int everLogged = 0;
hd = queue_First(&volume_hdr_LRU, volHeader);
queue_Remove(hd);
} else {
- /* LRU is empty, so allocate a new volHeader
+ /* LRU is empty, so allocate a new volHeader
* this is probably indicative of a leak, so let the user know */
hd = (struct volHeader *)calloc(1, sizeof(struct volHeader));
assert(hd != NULL);
volume_hdr_LRU.stats.free++;
}
if (hd->back) {
- /* this header used to belong to someone else.
+ /* this header used to belong to someone else.
* we'll need to check if the header needs to
* be sync'd out to disk */
* @internal volume package internal use only.
*/
static void
-ReleaseVolumeHeader(register struct volHeader *hd)
+ReleaseVolumeHeader(struct volHeader *hd)
{
if (programType != fileServer)
return;
* @internal volume package internal use only.
*/
static void
-FreeVolumeHeader(register Volume * vp)
+FreeVolumeHeader(Volume * vp)
{
- register struct volHeader *hd = vp->header;
+ struct volHeader *hd = vp->header;
if (!hd)
return;
if (programType == fileServer) {
*
* @post Volume Hash Table will have 2^logsize buckets
*/
-int
+int
VSetVolHashSize(int logsize)
{
- /* 64 to 16384 hash buckets seems like a reasonable range */
- if ((logsize < 6 ) || (logsize > 14)) {
+ /* 64 to 268435456 hash buckets seems like a reasonable range */
+ if ((logsize < 6 ) || (logsize > 28)) {
return -1;
}
-
+
if (!VInit) {
VolumeHashTable.Size = 1 << logsize;
VolumeHashTable.Mask = VolumeHashTable.Size - 1;
static void
VInitVolumeHash(void)
{
- register int i;
+ int i;
- VolumeHashTable.Table = (VolumeHashChainHead *) calloc(VolumeHashTable.Size,
+ VolumeHashTable.Table = (VolumeHashChainHead *) calloc(VolumeHashTable.Size,
sizeof(VolumeHashChainHead));
assert(VolumeHashTable.Table != NULL);
-
+
for (i=0; i < VolumeHashTable.Size; i++) {
queue_Init(&VolumeHashTable.Table[i]);
#ifdef AFS_DEMAND_ATTACH_FS
* asynchronous hash chain reordering to finish.
*/
static void
-AddVolumeToHashTable(register Volume * vp, int hashid)
+AddVolumeToHashTable(Volume * vp, int hashid)
{
VolumeHashChainHead * head;
* asynchronous hash chain reordering to finish.
*/
static void
-DeleteVolumeFromHashTable(register Volume * vp)
+DeleteVolumeFromHashTable(Volume * vp)
{
VolumeHashChainHead * head;
*
* @param[out] ec error code return
* @param[in] volumeId volume id
- * @param[in] hint volume object which we believe could be the correct
+ * @param[in] hint volume object which we believe could be the correct
mapping
*
* @return volume object pointer
* @retval NULL no such volume id is registered with the hash table.
*
- * @pre VOL_LOCK is held. For DAFS, caller must hold a lightweight
+ * @pre VOL_LOCK is held. For DAFS, caller must hold a lightweight
ref on hint.
*
- * @post volume object with the given id is returned. volume object and
- * hash chain access statistics are updated. hash chain may have
+ * @post volume object with the given id is returned. volume object and
+ * hash chain access statistics are updated. hash chain may have
* been reordered.
*
- * @note For DAFS, VOL_LOCK may be dropped in order to wait for an
- * asynchronous hash chain reordering operation to finish, or
+ * @note For DAFS, VOL_LOCK may be dropped in order to wait for an
+ * asynchronous hash chain reordering operation to finish, or
* in order for us to perform an asynchronous chain reordering.
*
- * @note Hash chain reorderings occur when the access count for the
- * volume object being looked up exceeds the sum of the previous
- * node's (the node ahead of it in the hash chain linked list)
+ * @note Hash chain reorderings occur when the access count for the
+ * volume object being looked up exceeds the sum of the previous
+ * node's (the node ahead of it in the hash chain linked list)
* access count plus the constant VOLUME_HASH_REORDER_THRESHOLD.
*
- * @note For DAFS, the hint parameter allows us to short-circuit if the
- * cacheCheck fields match between the hash chain head and the
+ * @note For DAFS, the hint parameter allows us to short-circuit if the
+ * cacheCheck fields match between the hash chain head and the
* hint volume object.
*/
Volume *
VLookupVolume_r(Error * ec, VolId volumeId, Volume * hint)
{
- register int looks = 0;
+ int looks = 0;
Volume * vp, *np;
#ifdef AFS_DEMAND_ATTACH_FS
Volume *pp;
#endif /* AFS_DEMAND_ATTACH_FS */
/* someday we need to either do per-chain locks, RWlocks,
- * or both for volhash access.
+ * or both for volhash access.
* (and move to a data structure with better cache locality) */
/* search the chain for this volume id */
/* update the short-circuit cache check */
vp->chainCacheCheck = head->cacheCheck;
}
-#endif /* AFS_DEMAND_ATTACH_FS */
+#endif /* AFS_DEMAND_ATTACH_FS */
return vp;
}
* @note This interface should be called before any attempt to
* traverse the hash chain. It is permissible for a thread
* to gain exclusive access to the chain, and then perform
- * latent operations on the chain asynchronously wrt the
+ * latent operations on the chain asynchronously wrt the
* VOL_LOCK.
*
* @warning if waiting is necessary, VOL_LOCK is dropped
* @note This interface should be called before any attempt to
* traverse the VByPList. It is permissible for a thread
* to gain exclusive access to the list, and then perform
- * latent operations on the list asynchronously wrt the
+ * latent operations on the list asynchronously wrt the
* VOL_LOCK.
*
* @warning if waiting is necessary, VOL_LOCK is dropped
VPrintCacheStats_r(void)
{
afs_uint32 get_hi, get_lo, load_hi, load_lo;
- register struct VnodeClassInfo *vcp;
+ struct VnodeClassInfo *vcp;
vcp = &VnodeClassInfo[vLarge];
Log("Large vnode cache, %d entries, %d allocs, %d gets (%d reads), %d writes\n", vcp->cacheSize, vcp->allocs, vcp->gets, vcp->reads, vcp->writes);
vcp = &VnodeClassInfo[vSmall];
struct VLRUExtStatsEntry * vec;
};
-/**
+/**
* add a 256-entry fudge factor onto the vector in case state changes
* out from under us.
*/
reorders.sum += ch_reorders.sum;
len.sum += (double)head->len;
vol_sum += head->len;
-
+
if (i == 0) {
len.min = (double) head->len;
len.max = (double) head->len;
/* dump per-chain stats */
Log("Volume hash chain %d : len=%d, looks=%s, reorders=%s\n",
- i, head->len,
+ i, head->len,
DoubleToPrintable(ch_looks.sum, pr_buf[0], sizeof(pr_buf[0])),
DoubleToPrintable(ch_reorders.sum, pr_buf[1], sizeof(pr_buf[1])));
Log("\tVolume gets : min=%s, max=%s, avg=%s, total=%s\n",
} else if (flags & VOL_STATS_PER_CHAIN) {
/* dump simple per-chain stats */
Log("Volume hash chain %d : len=%d, looks=%s, gets=%s, reorders=%s\n",
- i, head->len,
+ i, head->len,
DoubleToPrintable(ch_looks.sum, pr_buf[0], sizeof(pr_buf[0])),
DoubleToPrintable(ch_gets.sum, pr_buf[1], sizeof(pr_buf[1])),
DoubleToPrintable(ch_reorders.sum, pr_buf[2], sizeof(pr_buf[2])));
* of the VGetPartitionById_r interface contract. */
diskP = VGetPartitionById_r(i, 0);
if (diskP) {
- Log("Partition %s has %d online volumes\n",
+ Log("Partition %s has %d online volumes\n",
VPartitionPath(diskP), diskP->vol_list.len);
}
}
{
return vol_opts.canUseSALVSYNC;
}
+
+afs_int32
+VCanUnsafeAttach(void)
+{
+ return vol_opts.unsafe_attach;
+}