#include <sys/file.h>
#endif
-#include <rx/xdr.h>
+#ifdef AFS_PTHREAD_ENV
+# include <opr/lock.h>
+#else
+# include <opr/lockstub.h>
+#endif
+#include <opr/ffs.h>
+#include <opr/jhash.h>
+
#include <afs/afsint.h>
+#include <rx/rx_queue.h>
+
#ifndef AFS_NT40_ENV
#if !defined(AFS_SGI_ENV)
-#ifdef AFS_OSF_ENV
-#include <ufs/fs.h>
-#else /* AFS_OSF_ENV */
#ifdef AFS_VFSINCL_ENV
#define VFS
#ifdef AFS_SUN5_ENV
#include <sys/fs.h>
#endif
#endif /* AFS_VFSINCL_ENV */
-#endif /* AFS_OSF_ENV */
#endif /* AFS_SGI_ENV */
#endif /* !AFS_NT40_ENV */
*/
static int vol_shutting_down = 0;
-#ifdef AFS_OSF_ENV
-extern void *calloc(), *realloc();
-#endif
-
/* Forward declarations */
-static Volume *attach2(Error * ec, VolId volumeId, char *path,
+static Volume *attach2(Error * ec, VolumeId volumeId, char *path,
struct DiskPartition64 *partp, Volume * vp,
int isbusy, int mode, int *acheckedOut);
static void ReallyFreeVolume(Volume * vp);
static int GetVolumeHeader(Volume * vp);
static void ReleaseVolumeHeader(struct volHeader *hd);
static void FreeVolumeHeader(Volume * vp);
-static void AddVolumeToHashTable(Volume * vp, int hashid);
+static void AddVolumeToHashTable(Volume * vp, VolumeId hashid);
static void DeleteVolumeFromHashTable(Volume * vp);
#if 0
static int VHold(Volume * vp);
static void LoadVolumeHeader(Error * ec, Volume * vp);
static int VCheckOffline(Volume * vp);
static int VCheckDetach(Volume * vp);
-static Volume * GetVolume(Error * ec, Error * client_ec, VolId volumeId,
+static Volume * GetVolume(Error * ec, Error * client_ec, VolumeId volumeId,
Volume * hint, const struct timespec *ts);
-int LogLevel; /* Vice loglevel--not defined as extern so that it will be
- * defined when not linked with vice, XXXX */
ProgramType programType; /* The type of program using the package */
static VolumePackageOptions vol_opts;
#endif
-#define VOLUME_BITMAP_GROWSIZE 16 /* bytes, => 128vnodes */
- /* Must be a multiple of 4 (1 word) !! */
-
/* this parameter needs to be tunable at runtime.
* 128 was really inadequate for largish servers -- at 16384 volumes this
* puts average chain length at 128, thus an average 65 deref's to find a volptr.
* an AVL or splay tree might work a lot better, but we'll just increase
* the default hash table size for now
*/
-#define DEFAULT_VOLUME_HASH_SIZE 256 /* Must be a power of 2!! */
-#define DEFAULT_VOLUME_HASH_MASK (DEFAULT_VOLUME_HASH_SIZE-1)
-#define VOLUME_HASH(volumeId) (volumeId&(VolumeHashTable.Mask))
+#define DEFAULT_VOLUME_HASH_BITS 10
+#define DEFAULT_VOLUME_HASH_SIZE opr_jhash_size(DEFAULT_VOLUME_HASH_BITS)
+#define DEFAULT_VOLUME_HASH_MASK opr_jhash_mask(DEFAULT_VOLUME_HASH_BITS)
+#define VOLUME_HASH(volumeId) \
+ (opr_jhash_int(volumeId, 0) & VolumeHashTable.Mask)
/*
* turn volume hash chains into partially ordered lists.
*/
#define VOLUME_HASH_REORDER_CHAIN_THRESH (VOLUME_HASH_REORDER_THRESHOLD / 2)
+/*
+ * The per volume uniquifier is bumped by 200 and and written to disk
+ * every 200 file creates.
+ */
+#define VOLUME_UPDATE_UNIQUIFIER_BUMP 200
+
#include "rx/rx_queue.h"
static void VInitVolumeHash(void);
-#ifndef AFS_HAVE_FFS
-/* This macro is used where an ffs() call does not exist. Was in util/ffs.c */
-ffs(x)
-{
- afs_int32 ffs_i;
- afs_int32 ffs_tmp = x;
- if (ffs_tmp == 0)
- return (-1);
- else
- for (ffs_i = 1;; ffs_i++) {
- if (ffs_tmp & 1)
- return (ffs_i);
- else
- ffs_tmp >>= 1;
- }
-}
-#endif /* !AFS_HAVE_FFS */
-
#ifdef AFS_PTHREAD_ENV
/**
* disk partition queue element
static void *VInitVolumePackageThread(void *args);
static struct DiskPartition64 *VInitNextPartition(struct partition_queue *pq);
-static VolId VInitNextVolumeId(DIR *dirp);
+static VolumeId VInitNextVolumeId(DIR *dirp);
static int VInitPreAttachVolumes(int nthreads, struct volume_init_queue *vq);
#endif /* !AFS_DEMAND_ATTACH_FS */
};
struct VLRU_DiskEntry {
- afs_uint32 vid; /* volume ID */
+ VolumeId vid; /* volume ID */
afs_uint32 idx; /* generation */
afs_uint32 last_get; /* timestamp of last get */
};
static void VVByPListWait_r(struct DiskPartition64 * dp);
/* online salvager */
+typedef enum {
+ VCHECK_SALVAGE_OK = 0, /**< no pending salvage */
+ VCHECK_SALVAGE_SCHEDULED = 1, /**< salvage has been scheduled */
+ VCHECK_SALVAGE_ASYNC = 2, /**< salvage being scheduled */
+ VCHECK_SALVAGE_DENIED = 3, /**< salvage not scheduled; denied */
+ VCHECK_SALVAGE_FAIL = 4 /**< salvage not scheduled; failed */
+} vsalvage_check;
static int VCheckSalvage(Volume * vp);
#if defined(SALVSYNC_BUILD_CLIENT) || defined(FSSYNC_BUILD_CLIENT)
static int VScheduleSalvage_r(Volume * vp);
VSetVInit_r(int value)
{
VInit = value;
- CV_BROADCAST(&vol_vinit_cond);
+ opr_cv_broadcast(&vol_vinit_cond);
}
static_inline void
} else {
VLRU_SetOptions(VLRU_SET_ENABLED, 0);
}
- osi_Assert(pthread_key_create(&VThread_key, NULL) == 0);
+ opr_Verify(pthread_key_create(&VThread_key, NULL) == 0);
#endif
- MUTEX_INIT(&vol_glock_mutex, "vol glock", MUTEX_DEFAULT, 0);
- MUTEX_INIT(&vol_trans_mutex, "vol trans", MUTEX_DEFAULT, 0);
- CV_INIT(&vol_put_volume_cond, "vol put", CV_DEFAULT, 0);
- CV_INIT(&vol_sleep_cond, "vol sleep", CV_DEFAULT, 0);
- CV_INIT(&vol_init_attach_cond, "vol init attach", CV_DEFAULT, 0);
- CV_INIT(&vol_vinit_cond, "vol init", CV_DEFAULT, 0);
+ opr_mutex_init(&vol_glock_mutex);
+ opr_mutex_init(&vol_trans_mutex);
+ opr_cv_init(&vol_put_volume_cond);
+ opr_cv_init(&vol_sleep_cond);
+ opr_cv_init(&vol_init_attach_cond);
+ opr_cv_init(&vol_vinit_cond);
#ifndef AFS_PTHREAD_ENV
IOMGR_Initialize();
#endif /* AFS_PTHREAD_ENV */
srandom(time(0)); /* For VGetVolumeInfo */
#ifdef AFS_DEMAND_ATTACH_FS
- MUTEX_INIT(&vol_salvsync_mutex, "salvsync", MUTEX_DEFAULT, 0);
+ opr_mutex_init(&vol_salvsync_mutex);
#endif /* AFS_DEMAND_ATTACH_FS */
/* Ok, we have done enough initialization that fileserver can
#if defined(AFS_DEMAND_ATTACH_FS) && defined(SALVSYNC_BUILD_CLIENT)
if (VCanUseSALVSYNC()) {
/* establish a connection to the salvager at this point */
- osi_Assert(VConnectSALV() != 0);
+ opr_Verify(VConnectSALV() != 0);
}
#endif /* AFS_DEMAND_ATTACH_FS */
int
VInitAttachVolumes(ProgramType pt)
{
- osi_Assert(VInit==1);
+ opr_Assert(VInit==1);
if (pt == fileServer) {
struct DiskPartition64 *diskP;
/* Attach all the volumes in this partition */
for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
int nAttached = 0, nUnattached = 0;
- osi_Assert(VAttachVolumesByPartition(diskP, &nAttached, &nUnattached) == 0);
+ opr_Verify(VAttachVolumesByPartition(diskP,
+ &nAttached, &nUnattached)
+ == 0);
}
}
VOL_LOCK;
int
VInitAttachVolumes(ProgramType pt)
{
- osi_Assert(VInit==1);
+ opr_Assert(VInit==1);
if (pt == fileServer) {
struct DiskPartition64 *diskP;
struct vinitvolumepackage_thread_t params;
pthread_t tid;
pthread_attr_t attrs;
- CV_INIT(¶ms.thread_done_cv, "thread done", CV_DEFAULT, 0);
+ opr_cv_init(¶ms.thread_done_cv);
queue_Init(¶ms);
params.n_threads_complete = 0;
/* create partition work queue */
for (parts=0, diskP = DiskPartitionList; diskP; diskP = diskP->next, parts++) {
- dpq = (diskpartition_queue_t *) malloc(sizeof(struct diskpartition_queue_t));
- osi_Assert(dpq != NULL);
+ dpq = malloc(sizeof(struct diskpartition_queue_t));
+ opr_Assert(dpq != NULL);
dpq->diskP = diskP;
queue_Append(¶ms,dpq);
}
- threads = MIN(parts, vol_attach_threads);
+ threads = min(parts, vol_attach_threads);
if (threads > 1) {
/* spawn off a bunch of initialization threads */
- osi_Assert(pthread_attr_init(&attrs) == 0);
- osi_Assert(pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED) == 0);
+ opr_Verify(pthread_attr_init(&attrs) == 0);
+ opr_Verify(pthread_attr_setdetachstate(&attrs,
+ PTHREAD_CREATE_DETACHED)
+ == 0);
Log("VInitVolumePackage: beginning parallel fileserver startup\n");
Log("VInitVolumePackage: using %d threads to attach volumes on %d partitions\n",
for (i=0; i < threads; i++) {
AFS_SIGSET_DECL;
AFS_SIGSET_CLEAR();
- osi_Assert(pthread_create
- (&tid, &attrs, &VInitVolumePackageThread,
- ¶ms) == 0);
+ opr_Verify(pthread_create(&tid, &attrs,
+ &VInitVolumePackageThread,
+ ¶ms) == 0);
AFS_SIGSET_RESTORE();
}
}
VOL_UNLOCK;
- osi_Assert(pthread_attr_destroy(&attrs) == 0);
+ opr_Verify(pthread_attr_destroy(&attrs) == 0);
} else {
/* if we're only going to run one init thread, don't bother creating
* another LWP */
VInitVolumePackageThread(¶ms);
}
- CV_DESTROY(¶ms.thread_done_cv);
+ opr_cv_destroy(¶ms.thread_done_cv);
}
VOL_LOCK;
VSetVInit_r(2); /* Initialized, and all volumes have been attached */
- CV_BROADCAST(&vol_init_attach_cond);
+ opr_cv_broadcast(&vol_init_attach_cond);
VOL_UNLOCK;
return 0;
}
diskP = dpq->diskP;
free(dpq);
- osi_Assert(VAttachVolumesByPartition(diskP, &nAttached, &nUnattached) == 0);
+ opr_Verify(VAttachVolumesByPartition(diskP, &nAttached,
+ &nUnattached) == 0);
VOL_LOCK;
}
done:
params->n_threads_complete++;
- CV_SIGNAL(¶ms->thread_done_cv);
+ opr_cv_signal(¶ms->thread_done_cv);
VOL_UNLOCK;
return NULL;
}
int
VInitAttachVolumes(ProgramType pt)
{
- osi_Assert(VInit==1);
+ opr_Assert(VInit==1);
if (pt == fileServer) {
struct DiskPartition64 *diskP;
/* create partition work queue */
queue_Init(&pq);
- CV_INIT(&(pq.cv), "partq", CV_DEFAULT, 0);
- MUTEX_INIT(&(pq.mutex), "partq", MUTEX_DEFAULT, 0);
+ opr_cv_init(&pq.cv);
+ opr_mutex_init(&pq.mutex);
for (parts = 0, diskP = DiskPartitionList; diskP; diskP = diskP->next, parts++) {
struct diskpartition_queue_t *dp;
- dp = (struct diskpartition_queue_t*)malloc(sizeof(struct diskpartition_queue_t));
- osi_Assert(dp != NULL);
+ dp = malloc(sizeof(struct diskpartition_queue_t));
+ opr_Assert(dp != NULL);
dp->diskP = diskP;
queue_Append(&pq, dp);
}
/* number of worker threads; at least one, not to exceed the number of partitions */
- threads = MIN(parts, vol_attach_threads);
+ threads = min(parts, vol_attach_threads);
/* create volume work queue */
queue_Init(&vq);
- CV_INIT(&(vq.cv), "volq", CV_DEFAULT, 0);
- MUTEX_INIT(&(vq.mutex), "volq", MUTEX_DEFAULT, 0);
+ opr_cv_init(&vq.cv);
+ opr_mutex_init(&vq.mutex);
- osi_Assert(pthread_attr_init(&attrs) == 0);
- osi_Assert(pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED) == 0);
+ opr_Verify(pthread_attr_init(&attrs) == 0);
+ opr_Verify(pthread_attr_setdetachstate(&attrs,
+ PTHREAD_CREATE_DETACHED) == 0);
Log("VInitVolumePackage: beginning parallel fileserver startup\n");
Log("VInitVolumePackage: using %d threads to pre-attach volumes on %d partitions\n",
struct vinitvolumepackage_thread_param *params;
AFS_SIGSET_DECL;
- params = (struct vinitvolumepackage_thread_param *)malloc(sizeof(struct vinitvolumepackage_thread_param));
- osi_Assert(params);
+ params = malloc(sizeof(struct vinitvolumepackage_thread_param));
+ opr_Assert(params);
params->pq = &pq;
params->vq = &vq;
params->nthreads = threads;
params->thread = i+1;
AFS_SIGSET_CLEAR();
- osi_Assert(pthread_create (&tid, &attrs, &VInitVolumePackageThread, (void*)params) == 0);
+ opr_Verify(pthread_create(&tid, &attrs,
+ &VInitVolumePackageThread,
+ (void*)params) == 0);
AFS_SIGSET_RESTORE();
}
VInitPreAttachVolumes(threads, &vq);
- osi_Assert(pthread_attr_destroy(&attrs) == 0);
- CV_DESTROY(&pq.cv);
- MUTEX_DESTROY(&pq.mutex);
- CV_DESTROY(&vq.cv);
- MUTEX_DESTROY(&vq.mutex);
+ opr_Verify(pthread_attr_destroy(&attrs) == 0);
+ opr_cv_destroy(&pq.cv);
+ opr_mutex_destroy(&pq.mutex);
+ opr_cv_destroy(&vq.cv);
+ opr_mutex_destroy(&vq.mutex);
}
VOL_LOCK;
VSetVInit_r(2); /* Initialized, and all volumes have been attached */
- CV_BROADCAST(&vol_init_attach_cond);
+ opr_cv_broadcast(&vol_init_attach_cond);
VOL_UNLOCK;
return 0;
struct volume_init_queue *vq;
struct volume_init_batch *vb;
- osi_Assert(args);
+ opr_Assert(args);
params = (struct vinitvolumepackage_thread_param *)args;
pq = params->pq;
vq = params->vq;
- osi_Assert(pq);
- osi_Assert(vq);
+ opr_Assert(pq);
+ opr_Assert(vq);
- vb = (struct volume_init_batch*)malloc(sizeof(struct volume_init_batch));
- osi_Assert(vb);
+ vb = malloc(sizeof(struct volume_init_batch));
+ opr_Assert(vb);
vb->thread = params->thread;
vb->last = 0;
vb->size = 0;
Log("Scanning partitions on thread %d of %d\n", params->thread, params->nthreads);
while((partition = VInitNextPartition(pq))) {
DIR *dirp;
- VolId vid;
+ VolumeId vid;
Log("Partition %s: pre-attaching volumes\n", partition->name);
dirp = opendir(VPartitionPath(partition));
continue;
}
while ((vid = VInitNextVolumeId(dirp))) {
- Volume *vp = (Volume*)malloc(sizeof(Volume));
- osi_Assert(vp);
- memset(vp, 0, sizeof(Volume));
+ Volume *vp = calloc(1, sizeof(Volume));
+ opr_Assert(vp);
vp->device = partition->device;
vp->partition = partition;
vp->hashid = vid;
queue_Init(&vp->vnode_list);
queue_Init(&vp->rx_call_list);
- CV_INIT(&V_attachCV(vp), "partattach", CV_DEFAULT, 0);
+ opr_cv_init(&V_attachCV(vp));
vb->batch[vb->size++] = vp;
if (vb->size == VINIT_BATCH_MAX_SIZE) {
- MUTEX_ENTER(&vq->mutex);
+ opr_mutex_enter(&vq->mutex);
queue_Append(vq, vb);
- CV_BROADCAST(&vq->cv);
- MUTEX_EXIT(&vq->mutex);
+ opr_cv_broadcast(&vq->cv);
+ opr_mutex_exit(&vq->mutex);
- vb = (struct volume_init_batch*)malloc(sizeof(struct volume_init_batch));
- osi_Assert(vb);
+ vb = malloc(sizeof(struct volume_init_batch));
+ opr_Assert(vb);
vb->thread = params->thread;
vb->size = 0;
vb->last = 0;
}
vb->last = 1;
- MUTEX_ENTER(&vq->mutex);
+ opr_mutex_enter(&vq->mutex);
queue_Append(vq, vb);
- CV_BROADCAST(&vq->cv);
- MUTEX_EXIT(&vq->mutex);
+ opr_cv_broadcast(&vq->cv);
+ opr_mutex_exit(&vq->mutex);
Log("Partition scan thread %d of %d ended\n", params->thread, params->nthreads);
free(params);
}
/* get next partition to scan */
- MUTEX_ENTER(&pq->mutex);
+ opr_mutex_enter(&pq->mutex);
if (queue_IsEmpty(pq)) {
- MUTEX_EXIT(&pq->mutex);
+ opr_mutex_exit(&pq->mutex);
return NULL;
}
dp = queue_First(pq, diskpartition_queue_t);
queue_Remove(dp);
- MUTEX_EXIT(&pq->mutex);
+ opr_mutex_exit(&pq->mutex);
- osi_Assert(dp);
- osi_Assert(dp->diskP);
+ opr_Assert(dp);
+ opr_Assert(dp->diskP);
partition = dp->diskP;
free(dp);
/**
* Find next volume id on the partition.
*/
-static VolId
+static VolumeId
VInitNextVolumeId(DIR *dirp)
{
struct dirent *d;
- VolId vid = 0;
+ VolumeId vid = 0;
char *ext;
while((d = readdir(dirp))) {
while (nthreads) {
/* dequeue next volume */
- MUTEX_ENTER(&vq->mutex);
+ opr_mutex_enter(&vq->mutex);
if (queue_IsEmpty(vq)) {
- CV_WAIT(&vq->cv, &vq->mutex);
+ opr_cv_wait(&vq->cv, &vq->mutex);
}
vb = queue_First(vq, volume_init_batch);
queue_Remove(vb);
- MUTEX_EXIT(&vq->mutex);
+ opr_mutex_exit(&vq->mutex);
if (vb->size) {
VOL_LOCK;
Log("Error looking up volume, code=%d\n", ec);
}
else if (dup) {
- Log("Warning: Duplicate volume id %d detected.\n", vp->hashid);
+ Log("Warning: Duplicate volume id %" AFS_VOLID_FMT " detected.\n", afs_printable_VolumeId_lu(vp->hashid));
}
else {
/* put pre-attached volume onto the hash table
(*(vp ? nAttached : nUnattached))++;
if (error == VOFFLINE)
Log("Volume %d stays offline (/vice/offline/%s exists)\n", VolumeNumber(dp->d_name), dp->d_name);
- else if (LogLevel >= 5) {
+ else if (GetLogLevel() >= 5) {
Log("Partition %s: attached volume %d (%s)\n",
diskP->name, VolumeNumber(dp->d_name),
dp->d_name);
if (vol_attach_threads > 1) {
/* prepare for parallel shutdown */
params.n_threads = vol_attach_threads;
- MUTEX_INIT(¶ms.lock, "params", MUTEX_DEFAULT, 0);
- CV_INIT(¶ms.cv, "params", CV_DEFAULT, 0);
- CV_INIT(¶ms.master_cv, "params master", CV_DEFAULT, 0);
- osi_Assert(pthread_attr_init(&attrs) == 0);
- osi_Assert(pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED) == 0);
+ opr_mutex_init(¶ms.lock);
+ opr_cv_init(¶ms.cv);
+ opr_cv_init(¶ms.master_cv);
+ opr_Verify(pthread_attr_init(&attrs) == 0);
+ opr_Verify(pthread_attr_setdetachstate(&attrs,
+ PTHREAD_CREATE_DETACHED) == 0);
queue_Init(¶ms);
/* setup the basic partition information structures for
/* build up the pass 0 shutdown work queue */
- dpq = (struct diskpartition_queue_t *) malloc(sizeof(struct diskpartition_queue_t));
- osi_Assert(dpq != NULL);
+ dpq = malloc(sizeof(struct diskpartition_queue_t));
+ opr_Assert(dpq != NULL);
dpq->diskP = diskP;
queue_Prepend(¶ms, dpq);
vol_attach_threads, params.n_parts, params.n_parts > 1 ? "s" : "" );
/* do pass 0 shutdown */
- MUTEX_ENTER(¶ms.lock);
+ opr_mutex_enter(¶ms.lock);
for (i=0; i < params.n_threads; i++) {
- osi_Assert(pthread_create
- (&tid, &attrs, &VShutdownThread,
- ¶ms) == 0);
+ opr_Verify(pthread_create(&tid, &attrs, &VShutdownThread,
+ ¶ms) == 0);
}
/* wait for all the pass 0 shutdowns to complete */
}
params.n_threads_complete = 0;
params.pass = 1;
- CV_BROADCAST(¶ms.cv);
- MUTEX_EXIT(¶ms.lock);
+ opr_cv_broadcast(¶ms.cv);
+ opr_mutex_exit(¶ms.lock);
Log("VShutdown: pass 0 completed using the 1 thread per partition algorithm\n");
Log("VShutdown: starting passes 1 through 3 using finely-granular mp-fast algorithm\n");
VOL_CV_WAIT(¶ms.cv);
}
- osi_Assert(pthread_attr_destroy(&attrs) == 0);
- CV_DESTROY(¶ms.cv);
- CV_DESTROY(¶ms.master_cv);
- MUTEX_DESTROY(¶ms.lock);
+ opr_Verify(pthread_attr_destroy(&attrs) == 0);
+ opr_cv_destroy(¶ms.cv);
+ opr_cv_destroy(¶ms.master_cv);
+ opr_mutex_destroy(¶ms.lock);
/* drop the VByPList exclusive reservations */
for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
for (queue_Scan(&VolumeHashTable.Table[i],vp,np,Volume)) {
code = VHold_r(vp);
if (code == 0) {
- if (LogLevel >= 5)
- Log("VShutdown: Attempting to take volume %u offline.\n",
- vp->hashid);
+ if (GetLogLevel() >= 5)
+ Log("VShutdown: Attempting to take volume %" AFS_VOLID_FMT " offline.\n",
+ afs_printable_VolumeId_lu(vp->hashid));
/* next, take the volume offline (drops reference count) */
VOffline_r(vp, "File server was shut down");
void
VShutdown(void)
{
- osi_Assert(VInit>0);
+ opr_Assert(VInit>0);
VOL_LOCK;
VShutdown_r();
VOL_UNLOCK;
params = (vshutdown_thread_t *) args;
/* acquire the shutdown pass 0 lock */
- MUTEX_ENTER(¶ms->lock);
+ opr_mutex_enter(¶ms->lock);
/* if there's still pass 0 work to be done,
* get a work entry, and do a pass 0 shutdown */
if (queue_IsNotEmpty(params)) {
dpq = queue_First(params, diskpartition_queue_t);
queue_Remove(dpq);
- MUTEX_EXIT(¶ms->lock);
+ opr_mutex_exit(¶ms->lock);
diskP = dpq->diskP;
free(dpq);
id = diskP->index;
while (ShutdownVolumeWalk_r(diskP, 0, ¶ms->part_pass_head[id]))
count++;
params->stats[0][diskP->index] = count;
- MUTEX_ENTER(¶ms->lock);
+ opr_mutex_enter(¶ms->lock);
}
params->n_threads_complete++;
if (params->n_threads_complete == params->n_threads) {
/* notify control thread that all workers have completed pass 0 */
- CV_SIGNAL(¶ms->master_cv);
+ opr_cv_signal(¶ms->master_cv);
}
while (params->pass == 0) {
- CV_WAIT(¶ms->cv, ¶ms->lock);
+ opr_cv_wait(¶ms->cv, ¶ms->lock);
}
/* switch locks */
- MUTEX_EXIT(¶ms->lock);
+ opr_mutex_exit(¶ms->lock);
VOL_LOCK;
pass = params->pass;
- osi_Assert(pass > 0);
+ opr_Assert(pass > 0);
/* now escalate through the more complicated shutdowns */
while (pass <= 3) {
ShutdownCreateSchedule(params);
/* wake up all the workers */
- CV_BROADCAST(¶ms->cv);
+ opr_cv_broadcast(¶ms->cv);
VOL_UNLOCK;
Log("VShutdown: pass %d completed using %d threads on %d partitions\n",
{
struct rx_queue * q = queue_First(&dp->vol_list, rx_queue);
int i = 0;
+ const char *pass_strs[4] = {"{un/pre}attached vols", "vols w/ vol header loaded", "vols w/o vol header loaded", "vols with exclusive state"};
- while (ShutdownVolumeWalk_r(dp, pass, &q))
+ while (ShutdownVolumeWalk_r(dp, pass, &q)) {
i++;
+ if (0 == i%100) {
+ Log("VShutdownByPartition: ... shut down %d volumes on %s in pass %d (%s)\n", i, VPartitionPath(dp), pass, pass_strs[pass]);
+ }
+ }
return i;
}
VCreateReservation_r(vp);
- if (LogLevel >= 5) {
- Log("VShutdownVolume_r: vid=%u, device=%d, state=%hu\n",
- vp->hashid, vp->partition->device, V_attachState(vp));
+ if (GetLogLevel() >= 5) {
+ Log("VShutdownVolume_r: vid=%" AFS_VOLID_FMT ", device=%d, state=%u\n",
+ afs_printable_VolumeId_lu(vp->hashid), vp->partition->device,
+ (unsigned int) V_attachState(vp));
}
/* wait for other blocking ops to finish */
VWaitExclusiveState_r(vp);
- osi_Assert(VIsValidState(V_attachState(vp)));
+ opr_Assert(VIsValidState(V_attachState(vp)));
switch(V_attachState(vp)) {
case VOL_STATE_SALVAGING:
case VOL_STATE_ATTACHED:
code = VHold_r(vp);
if (!code) {
- if (LogLevel >= 5)
- Log("VShutdown: Attempting to take volume %u offline.\n",
- vp->hashid);
+ if (GetLogLevel() >= 5)
+ Log("VShutdown: Attempting to take volume %" AFS_VOLID_FMT " offline.\n",
+ afs_printable_VolumeId_lu(vp->hashid));
/* take the volume offline (drops reference count) */
VOffline_r(vp, "File server was shut down");
/* Header I/O routines */
/***************************************************/
+static const char *
+HeaderName(bit32 magic)
+{
+ switch (magic) {
+ case VOLUMEINFOMAGIC:
+ return "volume info";
+ case SMALLINDEXMAGIC:
+ return "small index";
+ case LARGEINDEXMAGIC:
+ return "large index";
+ case LINKTABLEMAGIC:
+ return "link table";
+ }
+ return "unknown";
+}
+
/* open a descriptor for the inode (h),
* read in an on-disk structure into buffer (to) of size (size),
* verify versionstamp in structure has magic (magic) and
{
struct versionStamp *vsn;
FdHandle_t *fdP;
+ afs_sfsize_t nbytes;
+ afs_ino_str_t stmp;
*ec = 0;
if (h == NULL) {
+ Log("ReadHeader: Null inode handle argument for %s header file.\n",
+ HeaderName(magic));
*ec = VSALVAGE;
return;
}
fdP = IH_OPEN(h);
if (fdP == NULL) {
+ Log("ReadHeader: Failed to open %s header file "
+ "(volume=%" AFS_VOLID_FMT ", inode=%s); errno=%d\n", HeaderName(magic), afs_printable_VolumeId_lu(h->ih_vid),
+ PrintInode(stmp, h->ih_ino), errno);
*ec = VSALVAGE;
return;
}
vsn = (struct versionStamp *)to;
- if (FDH_PREAD(fdP, to, size, 0) != size || vsn->magic != magic) {
+ nbytes = FDH_PREAD(fdP, to, size, 0);
+ if (nbytes < 0) {
+ Log("ReadHeader: Failed to read %s header file "
+ "(volume=%" AFS_VOLID_FMT ", inode=%s); errno=%d\n", HeaderName(magic), afs_printable_VolumeId_lu(h->ih_vid),
+ PrintInode(stmp, h->ih_ino), errno);
*ec = VSALVAGE;
FDH_REALLYCLOSE(fdP);
return;
}
+ if (nbytes != size) {
+ Log("ReadHeader: Incorrect number of bytes read from %s header file "
+ "(volume=%" AFS_VOLID_FMT ", inode=%s); expected=%d, read=%d\n",
+ HeaderName(magic), afs_printable_VolumeId_lu(h->ih_vid),
+ PrintInode(stmp, h->ih_ino), size, (int)nbytes);
+ *ec = VSALVAGE;
+ FDH_REALLYCLOSE(fdP);
+ return;
+ }
+ if (vsn->magic != magic) {
+ Log("ReadHeader: Incorrect magic for %s header file "
+ "(volume=%" AFS_VOLID_FMT ", inode=%s); expected=0x%x, read=0x%x\n",
+ HeaderName(magic), afs_printable_VolumeId_lu(h->ih_vid),
+ PrintInode(stmp, h->ih_ino), magic, vsn->magic);
+ *ec = VSALVAGE;
+ FDH_REALLYCLOSE(fdP);
+ return;
+ }
+
FDH_CLOSE(fdP);
/* Check is conditional, in case caller wants to inspect version himself */
if (version && vsn->version != version) {
+ Log("ReadHeader: Incorrect version for %s header file "
+ "(volume=%" AFS_VOLID_FMT ", inode=%s); expected=%x, read=%x\n",
+ HeaderName(magic), afs_printable_VolumeId_lu(h->ih_vid), PrintInode(stmp, h->ih_ino),
+ version, vsn->version);
*ec = VSALVAGE;
}
}
Volume *
VPreAttachVolumeById_r(Error * ec,
char * partition,
- VolId volumeId)
+ VolumeId volumeId)
{
Volume *vp;
struct DiskPartition64 *partp;
*ec = 0;
- osi_Assert(programType == fileServer);
+ opr_Assert(programType == fileServer);
if (!(partp = VGetPartition_r(partition, 0))) {
*ec = VNOVOL;
return NULL;
}
+ /* ensure that any vp we pass to VPreAttachVolumeByVp_r
+ * is NOT in exclusive state.
+ */
+ retry:
vp = VLookupVolume_r(ec, volumeId, NULL);
+
if (*ec) {
return NULL;
}
+ if (vp && VIsExclusiveState(V_attachState(vp))) {
+ VCreateReservation_r(vp);
+ VWaitExclusiveState_r(vp);
+ VCancelReservation_r(vp);
+ vp = NULL;
+ goto retry; /* look up volume again */
+ }
+
+ /* vp == NULL or vp not exclusive both OK */
+
return VPreAttachVolumeByVp_r(ec, partp, vp, volumeId);
}
*
* @pre VOL_LOCK is held.
*
+ * @pre vp (if specified) must not be in exclusive state.
+ *
* @warning Returned volume object pointer does not have to
* equal the pointer passed in as argument vp. There
* are potential race conditions which can result in
VPreAttachVolumeByVp_r(Error * ec,
struct DiskPartition64 * partp,
Volume * vp,
- VolId vid)
+ VolumeId vid)
{
Volume *nvp = NULL;
*ec = 0;
+ /* don't proceed unless it's safe */
+ if (vp) {
+ opr_Assert(!VIsExclusiveState(V_attachState(vp)));
+ }
+
/* check to see if pre-attach already happened */
if (vp &&
(V_attachState(vp) != VOL_STATE_UNATTACHED) &&
* - volume is in an error state
* - volume is pre-attached
*/
- Log("VPreattachVolumeByVp_r: volume %u not in quiescent state\n", vid);
+ Log("VPreattachVolumeByVp_r: volume %" AFS_VOLID_FMT " not in quiescent state (state %u flags 0x%x)\n",
+ afs_printable_VolumeId_lu(vid), V_attachState(vp),
+ V_attachFlags(vp));
goto done;
} else if (vp) {
/* we're re-attaching a volume; clear out some old state */
VOL_UNLOCK;
/* allocate the volume structure */
- vp = nvp = (Volume *) malloc(sizeof(Volume));
- osi_Assert(vp != NULL);
- memset(vp, 0, sizeof(Volume));
+ vp = nvp = calloc(1, sizeof(Volume));
+ opr_Assert(vp != NULL);
queue_Init(&vp->vnode_list);
queue_Init(&vp->rx_call_list);
- CV_INIT(&V_attachCV(vp), "vp attach", CV_DEFAULT, 0);
+ opr_cv_init(&V_attachCV(vp));
}
/* link the volume with its associated vice partition */
VLRU_Init_Node_r(vp);
VChangeState_r(vp, VOL_STATE_PREATTACHED);
- if (LogLevel >= 5)
- Log("VPreAttachVolumeByVp_r: volume %u pre-attached\n", vp->hashid);
+ if (GetLogLevel() >= 5)
+ Log("VPreAttachVolumeByVp_r: volume %" AFS_VOLID_FMT " pre-attached\n", afs_printable_VolumeId_lu(vp->hashid));
done:
if (*ec)
struct DiskPartition64 *partp;
char path[64];
int isbusy = 0;
- VolId volumeId;
+ VolumeId volumeId;
int checkedOut;
#ifdef AFS_DEMAND_ATTACH_FS
VolumeStats stats_save;
}
if (VRequiresPartLock()) {
- osi_Assert(VInit == 3);
+ opr_Assert(VInit == 3);
VLockPartition_r(partition);
} else if (programType == fileServer) {
#ifdef AFS_DEMAND_ATTACH_FS
}
}
- osi_Assert(vp != NULL);
+ opr_Assert(vp != NULL);
/* handle pre-attach races
*
if (!vp) {
vp = (Volume *) calloc(1, sizeof(Volume));
- osi_Assert(vp != NULL);
+ opr_Assert(vp != NULL);
vp->hashid = volumeId;
vp->device = partp->device;
vp->partition = partp;
queue_Init(&vp->vnode_list);
queue_Init(&vp->rx_call_list);
#ifdef AFS_DEMAND_ATTACH_FS
- CV_INIT(&V_attachCV(vp), "vp attach", CV_DEFAULT, 0);
+ opr_cv_init(&V_attachCV(vp));
#endif /* AFS_DEMAND_ATTACH_FS */
}
goto done;
}
}
- if (LogLevel)
- Log("VOnline: volume %u (%s) attached and online\n", V_id(vp),
+ if (GetLogLevel() != 0)
+ Log("VOnline: volume %" AFS_VOLID_FMT " (%s) attached and online\n", afs_printable_VolumeId_lu(V_id(vp)),
V_name(vp));
}
struct DiskPartition64 *partp;
char path[64];
int isbusy = 0;
- VolId volumeId;
+ VolumeId volumeId;
Volume * nvp = NULL;
VolumeStats stats_save;
int checkedOut;
*ec = 0;
/* volume utility should never call AttachByVp */
- osi_Assert(programType == fileServer);
+ opr_Assert(programType == fileServer);
volumeId = vp->hashid;
partp = vp->partition;
}
}
- osi_Assert(vp != NULL);
+ opr_Assert(vp != NULL);
VChangeState_r(vp, VOL_STATE_ATTACHING);
/* restore monotonically increasing stats */
VUpdateVolume_r(ec, vp, 0);
if (*ec) {
- Log("VAttachVolume: Error updating volume %u\n", vp->hashid);
+ Log("VAttachVolume: Error updating volume %" AFS_VOLID_FMT "\n",
+ afs_printable_VolumeId_lu(vp->hashid));
VPutVolume_r(vp);
goto done;
}
#endif /* !AFS_DEMAND_ATTACH_FS */
VAddToVolumeUpdateList_r(ec, vp);
if (*ec) {
- Log("VAttachVolume: Error adding volume %u to update list\n", vp->hashid);
+ Log("VAttachVolume: Error adding volume %" AFS_VOLID_FMT " to update list\n",
+ afs_printable_VolumeId_lu(vp->hashid));
if (vp)
VPutVolume_r(vp);
goto done;
}
}
- if (LogLevel)
- Log("VOnline: volume %u (%s) attached and online\n", V_id(vp),
- V_name(vp));
+ if (GetLogLevel() != 0)
+ Log("VOnline: volume %" AFS_VOLID_FMT " (%s) attached and online\n",
+ afs_printable_VolumeId_lu(V_id(vp)), V_name(vp));
done:
if (reserve) {
VCancelReservation_r(nvp);
{
int code;
- osi_Assert(programType != fileServer || VIsExclusiveState(V_attachState(vp)));
- osi_Assert(!(V_attachFlags(vp) & VOL_LOCKED));
+ opr_Assert(programType != fileServer
+ || VIsExclusiveState(V_attachState(vp)));
+ opr_Assert(!(V_attachFlags(vp) & VOL_LOCKED));
code = VLockVolumeByIdNB(vp->hashid, vp->partition, locktype);
if (code == 0) {
static void
VUnlockVolume(Volume *vp)
{
- osi_Assert(programType != fileServer || VIsExclusiveState(V_attachState(vp)));
- osi_Assert((V_attachFlags(vp) & VOL_LOCKED));
+ opr_Assert(programType != fileServer
+ || VIsExclusiveState(V_attachState(vp)));
+ opr_Assert((V_attachFlags(vp) & VOL_LOCKED));
VUnlockVolumeById(vp->hashid, vp->partition);
if (VCanUseFSSYNC() && (mode == V_PEEK || peek)) {
SYNC_response res;
res.payload.len = sizeof(VolumeDiskData);
- res.payload.buf = &vp->header->diskstuff;
+ res.payload.buf = &(V_disk(vp));
if (FSYNC_VolOp(vp->hashid,
partp->name,
}
if (*ec) {
+ VOL_LOCK;
+ FreeVolumeHeader(vp);
+ VOL_UNLOCK;
return;
}
if (retry) {
switch (vp->pending_vol_op->vol_op_state) {
case FSSYNC_VolOpPending:
/* this should never happen */
- osi_Assert(vp->pending_vol_op->vol_op_state != FSSYNC_VolOpPending);
+ opr_Assert(vp->pending_vol_op->vol_op_state
+ != FSSYNC_VolOpPending);
break;
case FSSYNC_VolOpRunningUnknown:
/* this should never happen; we resolved 'unknown' above */
- osi_Assert(vp->pending_vol_op->vol_op_state != FSSYNC_VolOpRunningUnknown);
+ opr_Assert(vp->pending_vol_op->vol_op_state
+ != FSSYNC_VolOpRunningUnknown);
break;
case FSSYNC_VolOpRunningOffline:
* @post VOL_LOCK held
*/
static Volume *
-attach2(Error * ec, VolId volumeId, char *path, struct DiskPartition64 *partp,
+attach2(Error * ec, VolumeId volumeId, char *path, struct DiskPartition64 *partp,
Volume * vp, int isbusy, int mode, int *acheckedOut)
{
/* have we read in the header successfully? */
if (*ec == VNOVOL) {
/* if the volume doesn't exist, skip straight to 'error' so we don't
* request a salvage */
- VOL_LOCK;
- goto error_notbroken;
+ goto unlocked_error;
}
if (!*ec) {
if (!*ec) {
struct IndexFileHeader iHead;
-#if OPENAFS_VOL_STATS
/*
* We just read in the diskstuff part of the header. If the detailed
* volume stats area has not yet been initialized, we should bzero the
memset((V_stat_area(vp)), 0, VOL_STATS_BYTES);
V_stat_initialized(vp) = 1;
}
-#endif /* OPENAFS_VOL_STATS */
(void)ReadHeader(ec, vp->vnodeIndex[vSmall].handle,
(char *)&iHead, sizeof(iHead),
} else if (*ec) {
/* volume operation in progress */
VOL_LOCK;
+ /* we have already transitioned the vp away from ATTACHING state, so we
+ * can go right to the end of attach2, and we do not need to transition
+ * to ERROR. */
goto error_notbroken;
}
#else /* AFS_DEMAND_ATTACH_FS */
V_inUse(vp) = fileServer;
V_offlineMessage(vp)[0] = '\0';
}
+#ifdef AFS_DEMAND_ATTACH_FS
+ /* check if the volume is actually usable. only do this for DAFS; for
+ * non-DAFS, volumes that are not inService/blessed can still be
+ * attached, even if clients cannot access them. this is relevant
+ * because for non-DAFS, we try to attach the volume when e.g.
+ * volserver gives us back then vol when its done with it, but
+ * volserver may give us back a volume that is not inService/blessed. */
+
if (!V_inUse(vp)) {
*ec = VNOVOL;
-#ifdef AFS_DEMAND_ATTACH_FS
/* Put the vol into PREATTACHED state, so if someone tries to
* access it again, we try to attach, see that we're not blessed,
* and give a VNOVOL error again. Putting it into UNATTACHED state
* would result in a VOFFLINE error instead. */
error_state = VOL_STATE_PREATTACHED;
-#endif /* AFS_DEMAND_ATTACH_FS */
/* mimic e.g. GetVolume errors */
if (!V_blessed(vp)) {
FreeVolumeHeader(vp);
} else if (!V_inService(vp)) {
Log("Volume %lu offline: not in service\n", afs_printable_uint32_lu(V_id(vp)));
+ /* the volume is offline and should be unattached */
+ *ec = VOFFLINE;
+ error_state = VOL_STATE_UNATTACHED;
FreeVolumeHeader(vp);
} else {
Log("Volume %lu offline: needs salvage\n", afs_printable_uint32_lu(V_id(vp)));
*ec = VSALVAGE;
-#ifdef AFS_DEMAND_ATTACH_FS
error_state = VOL_STATE_ERROR;
/* see if we can recover */
- VRequestSalvage_r(ec, vp, SALVSYNC_NEEDED, 0 /*flags*/);
-#endif
+ VRequestSalvage_r(ec, vp, SALVSYNC_NEEDED, VOL_SALVAGE_NO_OFFLINE);
}
-#ifdef AFS_DEMAND_ATTACH_FS
vp->nUsers = 0;
-#endif
goto locked_error;
}
+#endif /* AFS_DEMAND_ATTACH_FS */
} else {
#ifdef AFS_DEMAND_ATTACH_FS
- if ((mode != V_PEEK) && (mode != V_SECRETLY))
+ if ((mode != V_PEEK) && (mode != V_SECRETLY) && (mode != V_READONLY))
V_inUse(vp) = programType;
#endif /* AFS_DEMAND_ATTACH_FS */
V_checkoutMode(vp) = mode;
}
- AddVolumeToHashTable(vp, V_id(vp));
+ AddVolumeToHashTable(vp, vp->hashid);
#ifdef AFS_DEMAND_ATTACH_FS
if (VCanUnlockAttached() && (V_attachFlags(vp) & VOL_LOCKED)) {
VUnlockVolume(vp);
return vp;
-#ifndef AFS_DEMAND_ATTACH_FS
unlocked_error:
-#endif
-
VOL_LOCK;
locked_error:
#ifdef AFS_DEMAND_ATTACH_FS
if (!VIsErrorState(V_attachState(vp))) {
+ if (programType != fileServer && *ec == VNOVOL) {
+ /* do not log anything in this case; it is common for
+ * non-fileserver programs to fail here with VNOVOL, since that
+ * is what happens when they simply try to use a volume, but that
+ * volume doesn't exist. */
+
+ } else if (VIsErrorState(error_state)) {
+ Log("attach2: forcing vol %" AFS_VOLID_FMT " to error state (state %u flags 0x%x ec %d)\n",
+ afs_printable_VolumeId_lu(vp->hashid), V_attachState(vp),
+ V_attachFlags(vp), *ec);
+ }
VChangeState_r(vp, error_state);
}
#endif /* AFS_DEMAND_ATTACH_FS */
VReleaseVolumeHandles_r(vp);
}
- error_notbroken:
#ifdef AFS_DEMAND_ATTACH_FS
- VCheckSalvage(vp);
+ error_notbroken:
+ if (VCheckSalvage(vp) == VCHECK_SALVAGE_FAIL) {
+ /* The salvage could not be scheduled with the salvage server
+ * due to a hard error. Reset the error code to prevent retry loops by
+ * callers. */
+ if (*ec == VSALVAGING) {
+ *ec = VSALVAGE;
+ }
+ }
if (forcefree) {
FreeVolume(vp);
} else {
Error error;
vp = VGetVolume_r(&error, volumeId);
if (vp) {
- osi_Assert(V_inUse(vp) == 0);
+ opr_Assert(V_inUse(vp) == 0);
VDetachVolume_r(ec, vp);
}
return NULL;
VOfflineTimeout(struct timespec *ats)
{
if (vol_shutting_down) {
- osi_Assert(pthread_once(&shutdown_timeout_once, VShutdownTimeoutInit) == 0);
+ opr_Verify(pthread_once(&shutdown_timeout_once,
+ VShutdownTimeoutInit) == 0);
return shutdown_timeout;
} else {
return VCalcTimeout(ats, vol_opts.offline_timeout);
void
VPutVolume_r(Volume * vp)
{
- osi_Assert(--vp->nUsers >= 0);
+ opr_Verify(--vp->nUsers >= 0);
if (vp->nUsers == 0) {
VCheckOffline(vp);
ReleaseVolumeHeader(vp->header);
of whether or not the volume is in service or on/off line. An error
code, however, is returned with an indication of the volume's status */
Volume *
-VGetVolume(Error * ec, Error * client_ec, VolId volumeId)
+VGetVolume(Error * ec, Error * client_ec, VolumeId volumeId)
{
Volume *retVal;
VOL_LOCK;
* VPutVolumeWithCall
*/
Volume *
-VGetVolumeWithCall(Error * ec, Error * client_ec, VolId volumeId,
+VGetVolumeWithCall(Error * ec, Error * client_ec, VolumeId volumeId,
const struct timespec *ts, struct VCallByVol *cbv)
{
Volume *retVal;
}
Volume *
-VGetVolume_r(Error * ec, VolId volumeId)
+VGetVolume_r(Error * ec, VolumeId volumeId)
{
return GetVolume(ec, NULL, volumeId, NULL, NULL);
}
* @note for LWP builds, 'timeout' must be NULL
*/
static Volume *
-GetVolume(Error * ec, Error * client_ec, VolId volumeId, Volume * hint,
+GetVolume(Error * ec, Error * client_ec, VolumeId volumeId, Volume * hint,
const struct timespec *timeout)
{
Volume *vp = hint;
* - VOL_STATE_SHUTTING_DOWN
*/
if ((V_attachState(vp) == VOL_STATE_ERROR) ||
- (V_attachState(vp) == VOL_STATE_SHUTTING_DOWN) ||
- (V_attachState(vp) == VOL_STATE_GOING_OFFLINE)) {
+ (V_attachState(vp) == VOL_STATE_SHUTTING_DOWN)) {
*ec = VNOVOL;
vp = NULL;
break;
}
/*
- * short circuit with VOFFLINE for VOL_STATE_UNATTACHED and
+ * short circuit with VOFFLINE for VOL_STATE_UNATTACHED/GOING_OFFLINE and
* VNOVOL for VOL_STATE_DELETED
*/
if ((V_attachState(vp) == VOL_STATE_UNATTACHED) ||
+ (V_attachState(vp) == VOL_STATE_GOING_OFFLINE) ||
(V_attachState(vp) == VOL_STATE_DELETED)) {
if (vp->specialStatus) {
*ec = vp->specialStatus;
case VSALVAGING:
break;
case VOFFLINE:
- if (!vp->pending_vol_op) {
- endloop = 1;
+ endloop = 1;
+ if (vp->specialStatus) {
+ *ec = vp->specialStatus;
}
break;
+
default:
- *ec = VNOVOL;
+ if (vp->specialStatus) {
+ *ec = vp->specialStatus;
+ } else {
+ *ec = VNOVOL;
+ }
endloop = 1;
}
if (endloop) {
vp = NULL;
break;
}
-#endif
-#ifdef AFS_DEMAND_ATTACH_FS
+ if (VIsErrorState(V_attachState(vp))) {
+ /* make sure we don't take a vp in VOL_STATE_ERROR state and use
+ * it, or transition it out of that state */
+ if (!*ec) {
+ *ec = VNOVOL;
+ }
+ vp = NULL;
+ break;
+ }
+
/*
- * this test MUST happen after VAttachVolymeByVp, so vol_op_state is
- * not VolOpRunningUnknown (attach2 would have converted it to Online
- * or Offline)
+ * this test MUST happen after VAttachVolymeByVp, so we have no
+ * conflicting vol op. (attach2 would have errored out if we had one;
+ * specifically attach_check_vop must have detected a conflicting vop)
*/
+ opr_Assert(!vp->pending_vol_op || vp->pending_vol_op->vol_op_state == FSSYNC_VolOpRunningOnline);
- /* only valid before/during demand attachment */
- osi_Assert(!vp->pending_vol_op || vp->pending_vol_op->vol_op_state != FSSYNC_VolOpRunningUnknown);
-
- /* deny getvolume due to running mutually exclusive vol op */
- if (vp->pending_vol_op && vp->pending_vol_op->vol_op_state==FSSYNC_VolOpRunningOffline) {
- /*
- * volume cannot remain online during this volume operation.
- * notify client.
- */
- if (vp->specialStatus) {
- /*
- * special status codes outrank normal VOFFLINE code
- */
- *ec = vp->specialStatus;
- if (client_ec) {
- *client_ec = vp->specialStatus;
- }
- } else {
- if (client_ec) {
- /* see CheckVnode() in afsfileprocs.c for an explanation
- * of this error code logic */
- afs_uint32 now = FT_ApproxTime();
- if ((vp->stats.last_vol_op + (10 * 60)) >= now) {
- *client_ec = VBUSY;
- } else {
- *client_ec = VRESTARTING;
- }
- }
- *ec = VOFFLINE;
- }
- VChangeState_r(vp, VOL_STATE_UNATTACHED);
- FreeVolumeHeader(vp);
- vp = NULL;
- break;
- }
#endif /* AFS_DEMAND_ATTACH_FS */
LoadVolumeHeader(ec, vp);
VGET_CTR_INC(V6);
/* Only log the error if it was a totally unexpected error. Simply
* a missing inode is likely to be caused by the volume being deleted */
- if (errno != ENXIO || LogLevel)
- Log("Volume %u: couldn't reread volume header\n",
- vp->hashid);
+ if (errno != ENXIO || GetLogLevel() != 0)
+ Log("Volume %" AFS_VOLID_FMT ": couldn't reread volume header\n",
+ afs_printable_VolumeId_lu(vp->hashid));
#ifdef AFS_DEMAND_ATTACH_FS
if (VCanScheduleSalvage()) {
VRequestSalvage_r(ec, vp, SALVSYNC_ERROR, 0 /*flags*/);
#else /* AFS_PTHREAD_ENV */
/* LWP has no timed wait, so the caller better not be
* expecting one */
- osi_Assert(!timeout);
+ opr_Assert(!timeout);
LWP_WaitProcess(VPutVolume);
#endif /* AFS_PTHREAD_ENV */
continue;
#endif /* AFS_DEMAND_ATTACH_FS */
not_inited:
- osi_Assert(vp || *ec);
+ opr_Assert(vp || *ec);
return vp;
}
{
Error error;
- osi_Assert(vp->nUsers > 0);
- osi_Assert(programType == fileServer);
+ opr_Assert(vp->nUsers > 0);
+ opr_Assert(programType == fileServer);
VCreateReservation_r(vp);
VWaitExclusiveState_r(vp);
void
VTakeOffline_r(Volume * vp)
{
- osi_Assert(vp->nUsers > 0);
- osi_Assert(programType == fileServer);
+ opr_Assert(vp->nUsers > 0);
+ opr_Assert(programType == fileServer);
vp->goingOffline = 1;
V_needsSalvaged(vp) = 1;
strcpy(V_offlineMessage(vp),
"Forced offline due to internal error: volume needs to be salvaged");
- Log("Volume %u forced offline: it needs salvaging!\n", V_id(vp));
+ Log("Volume %" AFS_VOLID_FMT " forced offline: it needs salvaging!\n", afs_printable_VolumeId_lu(V_id(vp)));
V_inUse(vp) = 0;
vp->goingOffline = 0;
#endif /* AFS_DEMAND_ATTACH_FS */
#ifdef AFS_PTHREAD_ENV
- CV_BROADCAST(&vol_put_volume_cond);
+ opr_cv_broadcast(&vol_put_volume_cond);
#else /* AFS_PTHREAD_ENV */
LWP_NoYieldSignal(VPutVolume);
#endif /* AFS_PTHREAD_ENV */
#endif /* AFS_DEMAND_ATTACH_FS */
for(queue_Scan(&vp->rx_call_list, cbv, ncbv, VCallByVol)) {
- if (LogLevel > 0) {
+ if (GetLogLevel() != 0) {
struct rx_peer *peer;
char hoststr[16];
peer = rx_PeerOf(rx_ConnectionOf(cbv->call));
- Log("Offlining volume %lu while client %s:%u is trying to read "
+ Log("Offlining volume %" AFS_VOLID_FMT " while client %s:%u is trying to read "
"from it; kicking client off with error %ld\n",
- (long unsigned) vp->hashid,
+ afs_printable_VolumeId_lu(vp->hashid),
afs_inet_ntoa_r(rx_HostOf(peer), hoststr),
(unsigned) ntohs(rx_PortOf(peer)),
(long) err);
VolumeId vid = V_id(vp);
#endif
- osi_Assert(programType != volumeUtility && programType != volumeServer);
+ opr_Assert(programType != volumeUtility && programType != volumeServer);
if (!V_inUse(vp)) {
VPutVolume_r(vp);
return;
VOfflineForVolOp_r(Error *ec, Volume *vp, char *message)
{
int salvok = 1;
- osi_Assert(vp->pending_vol_op);
+ opr_Assert(vp->pending_vol_op);
if (!V_inUse(vp)) {
VPutVolume_r(vp);
*ec = 1;
IH_CONDSYNC(vp->vnodeIndex[vLarge].handle);
IH_CONDSYNC(vp->vnodeIndex[vSmall].handle);
IH_CONDSYNC(vp->diskDataHandle);
-#ifdef AFS_NT40_ENV
+#ifdef AFS_NAMEI_ENV
IH_CONDSYNC(vp->linkHandle);
-#endif /* AFS_NT40_ENV */
+#endif /* AFS_NAMEI_ENV */
}
IH_REALLYCLOSE(vp->vnodeIndex[vLarge].handle);
IH_CONDSYNC(vp->vnodeIndex[vLarge].handle);
IH_CONDSYNC(vp->vnodeIndex[vSmall].handle);
IH_CONDSYNC(vp->diskDataHandle);
-#ifdef AFS_NT40_ENV
+#ifdef AFS_NAMEI_ENV
IH_CONDSYNC(vp->linkHandle);
-#endif /* AFS_NT40_ENV */
+#endif /* AFS_NAMEI_ENV */
}
IH_RELEASE(vp->vnodeIndex[vLarge].handle);
#endif
*ec = 0;
- if (programType == fileServer)
- V_uniquifier(vp) =
- (V_inUse(vp) ? V_nextVnodeUnique(vp) +
- 200 : V_nextVnodeUnique(vp));
+ if (programType == fileServer) {
+ if (!V_inUse(vp)) {
+ V_uniquifier(vp) = V_nextVnodeUnique(vp);
+ } else {
+ V_uniquifier(vp) =
+ V_nextVnodeUnique(vp) + VOLUME_UPDATE_UNIQUIFIER_BUMP;
+ if (V_uniquifier(vp) < V_nextVnodeUnique(vp)) {
+ /* uniquifier rolled over; reset the counters */
+ V_nextVnodeUnique(vp) = 2; /* 1 is reserved for the root vnode */
+ V_uniquifier(vp) =
+ V_nextVnodeUnique(vp) + VOLUME_UPDATE_UNIQUIFIER_BUMP;
+ }
+ }
+ }
#ifdef AFS_DEMAND_ATTACH_FS
state_save = VChangeState_r(vp, VOL_STATE_UPDATING);
#endif
if (*ec) {
- Log("VUpdateVolume: error updating volume header, volume %u (%s)\n",
- V_id(vp), V_name(vp));
+ Log("VUpdateVolume: error updating volume header, volume %" AFS_VOLID_FMT " (%s)\n",
+ afs_printable_VolumeId_lu(V_id(vp)), V_name(vp));
/* try to update on-disk header,
* while preventing infinite recursion */
if (!(flags & VOL_UPDATE_NOFORCEOFF)) {
VOL_UNLOCK;
#endif
fdP = IH_OPEN(V_diskDataHandle(vp));
- osi_Assert(fdP != NULL);
+ opr_Assert(fdP != NULL);
code = FDH_SYNC(fdP);
- osi_Assert(code == 0);
+ opr_Assert(code == 0);
FDH_CLOSE(fdP);
#ifdef AFS_DEMAND_ATTACH_FS
VOL_LOCK;
V_inUse(vp) = 0;
VUpdateVolume_r(&ec, vp, VOL_UPDATE_NOFORCEOFF);
if (ec) {
- Log("VCheckDetach: volume header update for volume %u "
- "failed with errno %d\n", vp->hashid, errno);
+ Log("VCheckDetach: volume header update for volume %" AFS_VOLID_FMT " "
+ "failed with errno %d\n", afs_printable_VolumeId_lu(vp->hashid), errno);
}
}
VReleaseVolumeHandles_r(vp);
VCheckSalvage(vp);
ReallyFreeVolume(vp);
if (programType == fileServer) {
- CV_BROADCAST(&vol_put_volume_cond);
+ opr_cv_broadcast(&vol_put_volume_cond);
}
}
return ret;
V_inUse(vp) = 0;
VUpdateVolume_r(&ec, vp, VOL_UPDATE_NOFORCEOFF);
if (ec) {
- Log("VCheckDetach: volume header update for volume %u failed with errno %d\n",
- vp->hashid, errno);
+ Log("VCheckDetach: volume header update for volume %" AFS_VOLID_FMT " failed with errno %d\n",
+ afs_printable_VolumeId_lu(vp->hashid), errno);
}
}
VReleaseVolumeHandles_r(vp);
ReallyFreeVolume(vp);
if (programType == fileServer) {
#if defined(AFS_PTHREAD_ENV)
- CV_BROADCAST(&vol_put_volume_cond);
+ opr_cv_broadcast(&vol_put_volume_cond);
#else /* AFS_PTHREAD_ENV */
LWP_NoYieldSignal(VPutVolume);
#endif /* AFS_PTHREAD_ENV */
if (vp->goingOffline && !vp->nUsers) {
Error error;
- osi_Assert(programType == fileServer);
- osi_Assert((V_attachState(vp) != VOL_STATE_ATTACHED) &&
+ opr_Assert(programType == fileServer);
+ opr_Assert((V_attachState(vp) != VOL_STATE_ATTACHED) &&
(V_attachState(vp) != VOL_STATE_FREED) &&
(V_attachState(vp) != VOL_STATE_PREATTACHED) &&
(V_attachState(vp) != VOL_STATE_UNATTACHED) &&
VUpdateVolume_r(&error, vp, 0);
VCloseVolumeHandles_r(vp);
- if (LogLevel) {
+ if (GetLogLevel() != 0) {
if (V_offlineMessage(vp)[0]) {
Log("VOffline: Volume %lu (%s) is now offline (%s)\n",
afs_printable_uint32_lu(V_id(vp)), V_name(vp),
if (vp->goingOffline && !vp->nUsers) {
Error error;
- osi_Assert(programType == fileServer);
+ opr_Assert(programType == fileServer);
ret = 1;
vp->goingOffline = 0;
V_inUse(vp) = 0;
VUpdateVolume_r(&error, vp, 0);
VCloseVolumeHandles_r(vp);
- if (LogLevel) {
+ if (GetLogLevel() != 0) {
if (V_offlineMessage(vp)[0]) {
Log("VOffline: Volume %lu (%s) is now offline (%s)\n",
afs_printable_uint32_lu(V_id(vp)), V_name(vp),
}
FreeVolumeHeader(vp);
#ifdef AFS_PTHREAD_ENV
- CV_BROADCAST(&vol_put_volume_cond);
+ opr_cv_broadcast(&vol_put_volume_cond);
#else /* AFS_PTHREAD_ENV */
LWP_NoYieldSignal(VPutVolume);
#endif /* AFS_PTHREAD_ENV */
void
VCancelReservation_r(Volume * vp)
{
- osi_Assert(--vp->nWaiters >= 0);
+ opr_Verify(--vp->nWaiters >= 0);
if (vp->nWaiters == 0) {
VCheckOffline(vp);
if (!VCheckDetach(vp)) {
FSSYNC_VolOp_info * info;
/* attach a vol op info node to the volume struct */
- info = (FSSYNC_VolOp_info *) malloc(sizeof(FSSYNC_VolOp_info));
- osi_Assert(info != NULL);
+ info = malloc(sizeof(FSSYNC_VolOp_info));
+ opr_Assert(info != NULL);
memcpy(info, vopinfo, sizeof(FSSYNC_VolOp_info));
vp->pending_vol_op = info;
* @param[in] vp pointer to volume object
*
* @return status code
- * @retval 0 no salvage scheduled
- * @retval 1 a salvage has been scheduled with the salvageserver
+ * @retval VCHECK_SALVAGE_OK (0) no pending salvage
+ * @retval VCHECK_SALVAGE_SCHEDULED (1) salvage has been scheduled
+ * @retval VCHECK_SALVAGE_ASYNC (2) salvage being scheduled
+ * @retval VCHECK_SALVAGE_DENIED (3) salvage not scheduled; denied
+ * @retval VCHECK_SALVAGE_FAIL (4) salvage not scheduled; failed
*
* @pre VOL_LOCK is held
*
static int
VCheckSalvage(Volume * vp)
{
- int ret = 0;
+ int ret = VCHECK_SALVAGE_OK;
+
#if defined(SALVSYNC_BUILD_CLIENT) || defined(FSSYNC_BUILD_CLIENT)
- if (vp->nUsers)
- return ret;
if (!vp->salvage.requested) {
- return ret;
+ return VCHECK_SALVAGE_OK;
+ }
+ if (vp->nUsers) {
+ return VCHECK_SALVAGE_ASYNC;
}
/* prevent recursion; some of the code below creates and removes
* lightweight refs, which can call VCheckSalvage */
if (vp->salvage.scheduling) {
- return ret;
+ return VCHECK_SALVAGE_ASYNC;
}
vp->salvage.scheduling = 1;
if (V_attachState(vp) == VOL_STATE_SALVAGE_REQ) {
if (!VOfflineForSalvage_r(vp)) {
vp->salvage.scheduling = 0;
- return ret;
+ return VCHECK_SALVAGE_FAIL;
}
}
if (vp->salvage.requested) {
- VScheduleSalvage_r(vp);
- ret = 1;
+ ret = VScheduleSalvage_r(vp);
}
vp->salvage.scheduling = 0;
#endif /* SALVSYNC_BUILD_CLIENT || FSSYNC_BUILD_CLIENT */
*ec = VSALVAGING;
} else {
- Log("VRequestSalvage: volume %u online salvaged too many times; forced offline.\n", vp->hashid);
+ Log("VRequestSalvage: volume %" AFS_VOLID_FMT " online salvaged too many times; forced offline.\n", afs_printable_VolumeId_lu(vp->hashid));
/* make sure neither VScheduleSalvage_r nor
* VUpdateSalvagePriority_r try to schedule another salvage */
*ec = VSALVAGE;
code = 1;
}
+ if ((flags & VOL_SALVAGE_NO_OFFLINE)) {
+ /* Here, we free the header for the volume, but make sure to only
+ * do this if VOL_SALVAGE_NO_OFFLINE is specified. The reason for
+ * this requires a bit of explanation.
+ *
+ * Normally, the volume header will be freed when the volume goes
+ * goes offline. However, if VOL_SALVAGE_NO_OFFLINE has been
+ * specified, the volume was in the process of being attached when
+ * we discovered that it needed salvaging. Thus, the volume will
+ * never go offline, since it never went fully online in the first
+ * place. Specifically, we do not call VOfflineForSalvage_r above,
+ * and we never get rid of the volume via VPutVolume_r; the volume
+ * has not been initialized enough for those to work.
+ *
+ * So instead, explicitly free the volume header here. If we do not
+ * do this, we are wasting a header that some other volume could be
+ * using, since the header remains attached to the volume. Also if
+ * we do not free the header here, we end up with a volume where
+ * nUsers == 0, but the volume has a header that is not on the
+ * header LRU. Some code expects that all nUsers == 0 volumes have
+ * their header on the header LRU (or have no header).
+ *
+ * Also note that we must not free the volume header here if
+ * VOL_SALVAGE_NO_OFFLINE is not set. Since, if
+ * VOL_SALVAGE_NO_OFFLINE is not set, someone else may have a
+ * reference to this volume, and they assume they can use the
+ * volume's header. If we free the volume out from under them, they
+ * can easily segfault.
+ */
+ FreeVolumeHeader(vp);
+ }
}
return code;
}
try_SALVSYNC(Volume *vp, char *partName, int *code) {
#ifdef SALVSYNC_BUILD_CLIENT
if (VCanUseSALVSYNC()) {
- Log("Scheduling salvage for volume %lu on part %s over SALVSYNC\n",
- afs_printable_uint32_lu(vp->hashid), partName);
+ Log("Scheduling salvage for volume %" AFS_VOLID_FMT " on part %s over SALVSYNC\n",
+ afs_printable_VolumeId_lu(vp->hashid), partName);
/* can't use V_id() since there's no guarantee
* we have the disk data header at this point */
try_FSSYNC(Volume *vp, char *partName, int *code) {
#ifdef FSSYNC_BUILD_CLIENT
if (VCanUseFSSYNC()) {
- Log("Scheduling salvage for volume %lu on part %s over FSSYNC\n",
- afs_printable_uint32_lu(vp->hashid), partName);
+ Log("Scheduling salvage for volume %" AFS_VOLID_FMT " on part %s over FSSYNC\n",
+ afs_printable_VolumeId_lu(vp->hashid), partName);
/*
* If we aren't the fileserver, tell the fileserver the volume
* @param[in] vp pointer to volume object
*
* @return operation status
- * @retval 0 salvage scheduled successfully
- * @retval 1 salvage not scheduled, or SALVSYNC/FSSYNC com error
+ * @retval VCHECK_SALVAGE_OK (0) no pending salvage
+ * @retval VCHECK_SALVAGE_SCHEDULED (1) salvage has been scheduled
+ * @retval VCHECK_SALVAGE_ASYNC (2) salvage being scheduled
+ * @retval VCHECK_SALVAGE_DENIED (3) salvage not scheduled; denied
+ * @retval VCHECK_SALVAGE_FAIL (4) salvage not scheduled; failed
*
* @pre
* @arg VOL_LOCK is held.
static int
VScheduleSalvage_r(Volume * vp)
{
- int ret=0;
+ int ret = VCHECK_SALVAGE_SCHEDULED;
int code = 0;
VolState state_save;
VThreadOptions_t * thread_opts;
char partName[16];
- osi_Assert(VCanUseSALVSYNC() || VCanUseFSSYNC());
+ opr_Verify(VCanUseSALVSYNC() || VCanUseFSSYNC());
if (vp->nWaiters || vp->nUsers) {
- return 1;
+ return VCHECK_SALVAGE_ASYNC;
}
/* prevent endless salvage,attach,salvage,attach,... loops */
- if (vp->stats.salvages >= SALVAGE_COUNT_MAX)
- return 1;
+ if (vp->stats.salvages >= SALVAGE_COUNT_MAX) {
+ return VCHECK_SALVAGE_FAIL;
+ }
/*
* don't perform salvsync ops on certain threads
thread_opts = &VThread_defaults;
}
if (thread_opts->disallow_salvsync || vol_disallow_salvsync) {
- return 1;
+ return VCHECK_SALVAGE_ASYNC;
}
if (vp->salvage.scheduled) {
- return ret;
+ return VCHECK_SALVAGE_SCHEDULED;
}
VCreateReservation_r(vp);
* XXX the scheduling process should really be done asynchronously
* to avoid fssync deadlocks
*/
- if (!vp->salvage.scheduled) {
+ if (vp->salvage.scheduled) {
+ ret = VCHECK_SALVAGE_SCHEDULED;
+ } else {
/* if we haven't previously scheduled a salvage, do so now
*
* set the volume to an exclusive state and drop the lock
state_save = VChangeState_r(vp, VOL_STATE_SALVSYNC_REQ);
VOL_UNLOCK;
- osi_Assert(try_SALVSYNC(vp, partName, &code) ||
- try_FSSYNC(vp, partName, &code));
+ opr_Verify(try_SALVSYNC(vp, partName, &code)
+ || try_FSSYNC(vp, partName, &code));
VOL_LOCK;
VChangeState_r(vp, state_save);
if (code == SYNC_OK) {
+ ret = VCHECK_SALVAGE_SCHEDULED;
vp->salvage.scheduled = 1;
vp->stats.last_salvage_req = FT_ApproxTime();
if (VCanUseSALVSYNC()) {
IncUInt64(&VStats.salvages);
}
} else {
- ret = 1;
switch(code) {
case SYNC_BAD_COMMAND:
case SYNC_COM_ERROR:
+ ret = VCHECK_SALVAGE_FAIL;
break;
case SYNC_DENIED:
- Log("VScheduleSalvage_r: Salvage request for volume %lu "
- "denied\n", afs_printable_uint32_lu(vp->hashid));
+ ret = VCHECK_SALVAGE_DENIED;
+ Log("VScheduleSalvage_r: Salvage request for volume %" AFS_VOLID_FMT " "
+ "denied\n", afs_printable_VolumeId_lu(vp->hashid));
+ break;
+ case SYNC_FAILED:
+ ret = VCHECK_SALVAGE_FAIL;
+ Log("VScheduleSalvage_r: Salvage request for volume %" AFS_VOLID_FMT " "
+ "failed\n", afs_printable_VolumeId_lu(vp->hashid));
break;
default:
- Log("VScheduleSalvage_r: Salvage request for volume %lu "
+ ret = VCHECK_SALVAGE_FAIL;
+ Log("VScheduleSalvage_r: Salvage request for volume %" AFS_VOLID_FMT " "
"received unknown protocol error %d\n",
- afs_printable_uint32_lu(vp->hashid), code);
+ afs_printable_VolumeId_lu(vp->hashid), code);
break;
}
* this, as the caller may reference vp without any refs. Instead, it
* is the duty of the caller to inspect 'vp' after we return to see if
* needs to be freed. */
- osi_Assert(--vp->nWaiters >= 0);
+ opr_Verify(--vp->nWaiters >= 0);
return ret;
}
#endif /* SALVSYNC_BUILD_CLIENT || FSSYNC_BUILD_CLIENT */
VConnectFS_r(void)
{
int rc;
- osi_Assert((VInit == 2) &&
+ opr_Assert((VInit == 2) &&
(programType != fileServer) &&
(programType != salvager));
rc = FSYNC_clientInit();
void
VDisconnectFS_r(void)
{
- osi_Assert((programType != fileServer) &&
+ opr_Assert((programType != fileServer) &&
(programType != salvager));
FSYNC_clientFinis();
VSetVInit_r(2);
/* volume bitmap routines */
/***************************************************/
+/*
+ * Grow the bitmap by the defined increment
+ */
+void
+VGrowBitmap(struct vnodeIndex *index)
+{
+ byte *bp;
+
+ bp = realloc(index->bitmap, index->bitmapSize + VOLUME_BITMAP_GROWSIZE);
+ osi_Assert(bp != NULL);
+ index->bitmap = bp;
+ bp += index->bitmapSize;
+ memset(bp, 0, VOLUME_BITMAP_GROWSIZE);
+ index->bitmapOffset = index->bitmapSize;
+ index->bitmapSize += VOLUME_BITMAP_GROWSIZE;
+
+ return;
+}
+
/**
* allocate a vnode bitmap number for the vnode
*
index->bitmapOffset = (afs_uint32) (bp - index->bitmap);
while (*bp == 0xff)
bp++;
- o = ffs(~*bp) - 1; /* ffs is documented in BSTRING(3) */
+ o = opr_ffs(~*bp) - 1;
*bp |= (1 << o);
ret = ((bp - index->bitmap) * 8 + o);
#ifdef AFS_DEMAND_ATTACH_FS
bp += sizeof(bit32) /* i.e. 4 */ ;
}
/* No bit map entry--must grow bitmap */
- bp = (byte *)
- realloc(index->bitmap, index->bitmapSize + VOLUME_BITMAP_GROWSIZE);
- osi_Assert(bp != NULL);
- index->bitmap = bp;
- bp += index->bitmapSize;
- memset(bp, 0, VOLUME_BITMAP_GROWSIZE);
- index->bitmapOffset = index->bitmapSize;
- index->bitmapSize += VOLUME_BITMAP_GROWSIZE;
+ VGrowBitmap(index);
+ bp = index->bitmap + index->bitmapOffset;
*bp = 1;
ret = index->bitmapOffset * 8;
#ifdef AFS_DEMAND_ATTACH_FS
done:
#ifdef AFS_DEMAND_ATTACH_FS
- VCancelReservation_r(vp);
+ if (flags & VOL_FREE_BITMAP_WAIT) {
+ VCancelReservation_r(vp);
+ }
#endif
return; /* make the compiler happy for non-DAFS */
}
VOL_UNLOCK;
fdP = IH_OPEN(vip->handle);
- osi_Assert(fdP != NULL);
+ opr_Assert(fdP != NULL);
file = FDH_FDOPEN(fdP, "r");
- osi_Assert(file != NULL);
- vnode = (VnodeDiskObject *) malloc(vcp->diskSize);
- osi_Assert(vnode != NULL);
+ opr_Assert(file != NULL);
+ vnode = malloc(vcp->diskSize);
+ opr_Assert(vnode != NULL);
size = OS_SIZE(fdP->fd_fd);
- osi_Assert(size != -1);
+ opr_Assert(size != -1);
nVnodes = (size <= vcp->diskSize ? 0 : size - vcp->diskSize)
>> vcp->logSize;
vip->bitmapSize = ((nVnodes / 8) + 10) / 4 * 4; /* The 10 is a little extra so
* it that way */
#ifdef BITMAP_LATER
BitMap = (byte *) calloc(1, vip->bitmapSize);
- osi_Assert(BitMap != NULL);
+ opr_Assert(BitMap != NULL);
#else /* BITMAP_LATER */
vip->bitmap = (byte *) calloc(1, vip->bitmapSize);
- osi_Assert(vip->bitmap != NULL);
+ opr_Assert(vip->bitmap != NULL);
vip->bitmapOffset = 0;
#endif /* BITMAP_LATER */
if (STREAM_ASEEK(file, vcp->diskSize) != -1) {
vip->bitmap = BitMap;
vip->bitmapOffset = 0;
} else
- free((byte *) BitMap);
+ free(BitMap);
#endif /* BITMAP_LATER */
#ifdef AFS_DEMAND_ATTACH_FS
VChangeState_r(vp, state_save);
*
*/
void
-VGetVolumePath(Error * ec, VolId volumeId, char **partitionp, char **namep)
+VGetVolumePath(Error * ec, VolumeId volumeId, char **partitionp, char **namep)
{
static char partition[VMAXPATHLEN], name[VMAXPATHLEN];
char path[VMAXPATHLEN];
*ec = 0;
name[0] = OS_DIRSEPC;
snprintf(&name[1], (sizeof name) - 1, VFORMAT,
- afs_printable_uint32_lu(volumeId));
+ afs_printable_VolumeId_lu(volumeId));
for (dp = DiskPartitionList; dp; dp = dp->next) {
struct afs_stat_st status;
strcpy(path, VPartitionPath(dp));
VolumeExternalName(VolumeId volumeId)
{
static char name[VMAXPATHLEN];
- snprintf(name, sizeof name, VFORMAT, afs_printable_uint32_lu(volumeId));
+ snprintf(name, sizeof name, VFORMAT, afs_printable_VolumeId_lu(volumeId));
return name;
}
int
VolumeExternalName_r(VolumeId volumeId, char * name, size_t len)
{
- return snprintf(name, len, VFORMAT, afs_printable_uint32_lu(volumeId));
+ return snprintf(name, len, VFORMAT, afs_printable_VolumeId_lu(volumeId));
}
/* Volume Usage Statistics routines */
/***************************************************/
-#if OPENAFS_VOL_STATS
#define OneDay (86400) /* 24 hours' worth of seconds */
-#else
-#define OneDay (24*60*60) /* 24 hours */
-#endif /* OPENAFS_VOL_STATS */
static time_t
Midnight(time_t t) {
V_dayUse(vp) = 0;
V_dayUseDate(vp) = Midnight(now);
-#if OPENAFS_VOL_STATS
/*
* All we need to do is bzero the entire VOL_STATS_BYTES of
* the detailed volume statistics area.
*/
memset((V_stat_area(vp)), 0, VOL_STATS_BYTES);
-#endif /* OPENAFS_VOL_STATS */
- }
+ }
/*It's been more than a day of collection */
/*
return;
if (UpdateList == NULL) {
updateSize = UPDATE_LIST_SIZE;
- UpdateList = (VolumeId *) malloc(sizeof(VolumeId) * updateSize);
+ UpdateList = malloc(sizeof(VolumeId) * updateSize);
} else {
if (nUpdatedVolumes == updateSize) {
updateSize <<= 1;
Log("warning: there is likely a bug in the volume update scanner\n");
return;
}
- UpdateList =
- (VolumeId *) realloc(UpdateList,
- sizeof(VolumeId) * updateSize);
+ UpdateList = realloc(UpdateList,
+ sizeof(VolumeId) * updateSize);
}
}
- osi_Assert(UpdateList != NULL);
+ opr_Assert(UpdateList != NULL);
UpdateList[nUpdatedVolumes++] = V_id(vp);
#endif /* !AFS_DEMAND_ATTACH_FS */
}
queue_Init(&volume_LRU.q[i]);
volume_LRU.q[i].len = 0;
volume_LRU.q[i].busy = 0;
- CV_INIT(&volume_LRU.q[i].cv, "vol lru", CV_DEFAULT, 0);
+ opr_cv_init(&volume_LRU.q[i].cv);
}
/* setup the timing constants */
VLRU_ComputeConstants();
- /* XXX put inside LogLevel check? */
+ /* XXX put inside log level check? */
Log("VLRU: starting scanner with the following configuration parameters:\n");
Log("VLRU: offlining volumes after minimum of %d seconds of inactivity\n", VLRU_offline_thresh);
Log("VLRU: running VLRU soft detach pass every %d seconds\n", VLRU_offline_interval);
/* start up the VLRU scanner */
volume_LRU.scanner_state = VLRU_SCANNER_STATE_OFFLINE;
if (programType == fileServer) {
- CV_INIT(&volume_LRU.cv, "vol lru", CV_DEFAULT, 0);
- osi_Assert(pthread_attr_init(&attrs) == 0);
- osi_Assert(pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED) == 0);
- osi_Assert(pthread_create(&tid, &attrs, &VLRU_ScannerThread, NULL) == 0);
+ opr_cv_init(&volume_LRU.cv);
+ opr_Verify(pthread_attr_init(&attrs) == 0);
+ opr_Verify(pthread_attr_setdetachstate(&attrs,
+ PTHREAD_CREATE_DETACHED) == 0);
+ opr_Verify(pthread_create(&tid, &attrs,
+ &VLRU_ScannerThread, NULL) == 0);
}
}
if (!VLRU_enabled)
return;
- osi_Assert(queue_IsNotOnQueue(&vp->vlru));
+ opr_Assert(queue_IsNotOnQueue(&vp->vlru));
vp->vlru.idx = VLRU_QUEUE_INVALID;
}
if (queue_IsNotOnQueue(&vp->vlru))
return;
- osi_Assert(V_attachFlags(vp) & VOL_ON_VLRU);
+ opr_Assert(V_attachFlags(vp) & VOL_ON_VLRU);
/* update the access timestamp */
vp->stats.last_get = FT_ApproxTime();
/* check to see if we've been asked to pause */
if (volume_LRU.scanner_state == VLRU_SCANNER_STATE_PAUSING) {
volume_LRU.scanner_state = VLRU_SCANNER_STATE_PAUSED;
- CV_BROADCAST(&volume_LRU.cv);
+ opr_cv_broadcast(&volume_LRU.cv);
do {
VOL_CV_WAIT(&volume_LRU.cv);
} while (volume_LRU.scanner_state == VLRU_SCANNER_STATE_PAUSED);
/* signal that scanner is down */
volume_LRU.scanner_state = VLRU_SCANNER_STATE_OFFLINE;
- CV_BROADCAST(&volume_LRU.cv);
+ opr_cv_broadcast(&volume_LRU.cv);
VOL_UNLOCK;
return NULL;
}
Volume ** salv_flag_vec = NULL;
int salv_vec_offset = 0;
- osi_Assert(idx == VLRU_QUEUE_MID || idx == VLRU_QUEUE_OLD);
+ opr_Assert(idx == VLRU_QUEUE_MID || idx == VLRU_QUEUE_OLD);
/* get exclusive access to two chains, and drop the glock */
VLRU_Wait_r(&volume_LRU.q[idx-1]);
/* no big deal if this allocation fails */
if (volume_LRU.q[idx].len) {
- salv_flag_vec = (Volume **) malloc(volume_LRU.q[idx].len * sizeof(Volume *));
+ salv_flag_vec = malloc(volume_LRU.q[idx].len * sizeof(Volume *));
}
now = FT_ApproxTime();
Volume * vp;
int i, locked = 1;
- osi_Assert(idx == VLRU_QUEUE_NEW || idx == VLRU_QUEUE_CANDIDATE);
+ opr_Assert(idx == VLRU_QUEUE_NEW || idx == VLRU_QUEUE_CANDIDATE);
/* gain exclusive access to the idx VLRU */
VLRU_Wait_r(&volume_LRU.q[idx]);
idx = vp->vlru.idx;
- osi_Assert(idx == VLRU_QUEUE_NEW);
+ opr_Assert(idx == VLRU_QUEUE_NEW);
if (vp->stats.last_get <= thresh) {
/* move to candidate pool */
static void
VLRU_BeginExclusive_r(struct VLRU_q * q)
{
- osi_Assert(q->busy == 0);
+ opr_Assert(q->busy == 0);
q->busy = 1;
}
static void
VLRU_EndExclusive_r(struct VLRU_q * q)
{
- osi_Assert(q->busy);
+ opr_Assert(q->busy);
q->busy = 0;
- CV_BROADCAST(&q->cv);
+ opr_cv_broadcast(&q->cv);
}
/* wait for another thread to end exclusive access on VLRU */
afs_uint32 ts_save;
int ret = 0;
- osi_Assert(vp->vlru.idx == VLRU_QUEUE_CANDIDATE);
+ opr_Assert(vp->vlru.idx == VLRU_QUEUE_CANDIDATE);
ts_save = vp->stats.last_get;
if (ts_save > thresh)
vp = NULL;
} else {
/* pull it off the VLRU */
- osi_Assert(vp->vlru.idx == VLRU_QUEUE_CANDIDATE);
+ opr_Assert(vp->vlru.idx == VLRU_QUEUE_CANDIDATE);
volume_LRU.q[VLRU_QUEUE_CANDIDATE].len--;
queue_Remove(&vp->vlru);
vp->vlru.idx = VLRU_QUEUE_INVALID;
volume_hdr_LRU.stats.used = howMany;
volume_hdr_LRU.stats.attached = 0;
hp = (struct volHeader *)(calloc(howMany, sizeof(struct volHeader)));
- osi_Assert(hp != NULL);
+ opr_Assert(hp != NULL);
while (howMany--)
/* We are using ReleaseVolumeHeader to initialize the values on the header list
if (programType != fileServer) {
/* for volume utilities, we allocate volHeaders as needed */
if (!vp->header) {
- hd = (struct volHeader *)calloc(1, sizeof(*vp->header));
- osi_Assert(hd != NULL);
+ hd = calloc(1, sizeof(*vp->header));
+ opr_Assert(hd != NULL);
vp->header = hd;
hd->back = vp;
#ifdef AFS_DEMAND_ATTACH_FS
* still available. pull it off the lru and return */
hd = vp->header;
queue_Remove(hd);
- osi_Assert(hd->back == vp);
+ opr_Assert(hd->back == vp);
#ifdef AFS_DEMAND_ATTACH_FS
V_attachFlags(vp) &= ~(VOL_HDR_IN_LRU);
#endif
if (!hd) {
/* LRU is empty, so allocate a new volHeader
* this is probably indicative of a leak, so let the user know */
- hd = (struct volHeader *)calloc(1, sizeof(struct volHeader));
- osi_Assert(hd != NULL);
+ hd = calloc(1, sizeof(struct volHeader));
+ opr_Assert(hd != NULL);
if (!everLogged) {
Log("****Allocated more volume headers, probably leak****\n");
everLogged = 1;
#ifdef AFS_DEMAND_ATTACH_FS
/* GetVolHeaderFromLRU had better not give us back a header
* with a volume in exclusive state... */
- osi_Assert(!VIsExclusiveState(V_attachState(hd->back)));
+ opr_Assert(!VIsExclusiveState(V_attachState(hd->back)));
#endif
if (hd->diskstuff.inUse) {
}
if (!VInit) {
- VolumeHashTable.Size = 1 << logsize;
- VolumeHashTable.Mask = VolumeHashTable.Size - 1;
+ VolumeHashTable.Size = opr_jhash_size(logsize);
+ VolumeHashTable.Mask = opr_jhash_mask(logsize);
} else {
/* we can't yet support runtime modification of this
* parameter. we'll need a configuration rwlock to
VolumeHashTable.Table = (VolumeHashChainHead *) calloc(VolumeHashTable.Size,
sizeof(VolumeHashChainHead));
- osi_Assert(VolumeHashTable.Table != NULL);
+ opr_Assert(VolumeHashTable.Table != NULL);
for (i=0; i < VolumeHashTable.Size; i++) {
queue_Init(&VolumeHashTable.Table[i]);
#ifdef AFS_DEMAND_ATTACH_FS
- CV_INIT(&VolumeHashTable.Table[i].chain_busy_cv, "vhash busy", CV_DEFAULT, 0);
+ opr_cv_init(&VolumeHashTable.Table[i].chain_busy_cv);
#endif /* AFS_DEMAND_ATTACH_FS */
}
}
* asynchronous hash chain reordering to finish.
*/
static void
-AddVolumeToHashTable(Volume * vp, int hashid)
+AddVolumeToHashTable(Volume * vp, VolumeId hashid)
{
VolumeHashChainHead * head;
head->len++;
vp->hashid = hashid;
queue_Append(head, vp);
- vp->vnodeHashOffset = VolumeHashOffset_r();
}
/**
* hint volume object.
*/
Volume *
-VLookupVolume_r(Error * ec, VolId volumeId, Volume * hint)
+VLookupVolume_r(Error * ec, VolumeId volumeId, Volume * hint)
{
int looks = 0;
Volume * vp, *np;
/* search the chain for this volume id */
for(queue_Scan(head, vp, np, Volume)) {
looks++;
- if ((vp->hashid == volumeId)) {
+ if (vp->hashid == volumeId) {
break;
}
}
static void
VHashBeginExclusive_r(VolumeHashChainHead * head)
{
- osi_Assert(head->busy == 0);
+ opr_Assert(head->busy == 0);
head->busy = 1;
}
static void
VHashEndExclusive_r(VolumeHashChainHead * head)
{
- osi_Assert(head->busy);
+ opr_Assert(head->busy);
head->busy = 0;
- CV_BROADCAST(&head->chain_busy_cv);
+ opr_cv_broadcast(&head->chain_busy_cv);
}
/**
static void
VVByPListBeginExclusive_r(struct DiskPartition64 * dp)
{
- osi_Assert(dp->vol_list.busy == 0);
+ opr_Assert(dp->vol_list.busy == 0);
dp->vol_list.busy = 1;
}
static void
VVByPListEndExclusive_r(struct DiskPartition64 * dp)
{
- osi_Assert(dp->vol_list.busy);
+ opr_Assert(dp->vol_list.busy);
dp->vol_list.busy = 0;
- CV_BROADCAST(&dp->vol_list.cv);
+ opr_cv_broadcast(&dp->vol_list.cv);
}
/**