code = ENOENT;
- code = cm_BeginDirOp(scp, userp, reqp, CM_DIRLOCK_READ, &dirop);
+ code = cm_BeginDirOp(scp, userp, reqp, CM_DIRLOCK_READ,
+ CM_DIROP_FLAG_NONE, &dirop);
if (code == 0) {
#ifdef USE_BPLUS
int usedBplus = 0;
#endif
- code = cm_BeginDirOp(dscp, userp, reqp, CM_DIRLOCK_READ, &dirop);
+ code = cm_BeginDirOp(dscp, userp, reqp, CM_DIRLOCK_READ,
+ CM_DIROP_FLAG_NONE, &dirop);
if (code == 0) {
#ifdef USE_BPLUS
code = cm_BPlusDirLookup(&dirop, nnamep, &rock.fid);
if (fnamep == NULL) {
code = -1;
#ifdef USE_BPLUS
- code = cm_BeginDirOp(dscp, userp, reqp, CM_DIRLOCK_READ, &dirop);
+ code = cm_BeginDirOp(dscp, userp, reqp, CM_DIRLOCK_READ,
+ CM_DIROP_FLAG_NONE, &dirop);
if (code == 0) {
code = cm_BPlusDirLookupOriginalName(&dirop, cnamep, &fnamep);
if (code == 0)
code = cm_Lookup(dscp, cnamep, CM_FLAG_NOMOUNTCHASE, userp, reqp, &scp);
/* make sure we don't screw up the dir status during the merge */
- code = cm_BeginDirOp(dscp, userp, reqp, CM_DIRLOCK_NONE, &dirop);
+ code = cm_BeginDirOp(dscp, userp, reqp, CM_DIRLOCK_NONE,
+ CM_DIROP_FLAG_NONE, &dirop);
lock_ObtainWrite(&dscp->rw);
sflags = CM_SCACHESYNC_STOREDATA;
StringCbCopyA((char *) tsp->data, sizeof(tsp->data), linkp+cm_mountRootLen+1);
else
tsp->data[0] = 0;
- *newRootScpp = cm_data.rootSCachep;
- cm_HoldSCache(cm_data.rootSCachep);
+ *newRootScpp = cm_RootSCachep(userp, reqp);
+ cm_HoldSCache(*newRootScpp);
} else if (linkp[0] == '\\' && linkp[1] == '\\') {
if (!strnicmp(&linkp[2], cm_NetbiosName, (len = (long)strlen(cm_NetbiosName))))
{
if (*p == '\\')
*p = '/';
}
- *newRootScpp = cm_data.rootSCachep;
- cm_HoldSCache(cm_data.rootSCachep);
+ *newRootScpp = cm_RootSCachep(userp, reqp);
+ cm_HoldSCache(*newRootScpp);
} else {
linkScp->fileType = CM_SCACHETYPE_DFSLINK;
StringCchCopyA(tsp->data,lengthof(tsp->data), linkp);
* but this seems to create problems. instead, we will just
* reject the link */
StringCchCopyA(tsp->data,lengthof(tsp->data), linkp+1);
- *newRootScpp = cm_data.rootSCachep;
- cm_HoldSCache(cm_data.rootSCachep);
+ *newRootScpp = cm_RootSCachep(userp, reqp);
+ cm_HoldSCache(*newRootScpp);
#else
/* we still copy the link data into the response so that
* the user can see what the link points to
cm_scache_t *scp;
cm_fid_t tfid;
struct rx_connection * rxconnp;
- int inlinebulk = 0; /* Did we use InlineBulkStatus RPC or not? */
-
+ int inlinebulk; /* Did we use InlineBulkStatus RPC or not? */
+
memset(&volSync, 0, sizeof(volSync));
/* otherwise, we may have one or more bulk stat's worth of stuff in bb;
callbackStruct.AFSCBs_val = &bbp->callbacks[filex];
cm_StartCallbackGrantingCall(NULL, &cbReq);
osi_Log1(afsd_logp, "CALL BulkStatus, %d entries", filesThisCall);
+
+ /*
+ * Whenever cm_Analyze is called for a RXAFS_ RPC there must
+ * be a FID provided. However, the error code from RXAFS_BulkStatus
+ * or RXAFS_InlinkBulkStatus does not apply to any FID. Therefore,
+ * we generate an invalid FID to match with the RPC error.
+ */
+ cm_SetFid(&tfid, dscp->fid.cell, dscp->fid.volume, 0, 0);
+
do {
- code = cm_ConnFromFID(&dscp->fid, userp, reqp, &connp);
+ inlinebulk = 0;
+
+ code = cm_ConnFromFID(&tfid, userp, reqp, &connp);
if (code)
continue;
rxconnp = cm_GetRxConn(connp);
if (!(connp->serverp->flags & CM_SERVERFLAG_NOINLINEBULK)) {
code = RXAFS_InlineBulkStatus(rxconnp, &fidStruct,
- &statStruct, &callbackStruct, &volSync);
+ &statStruct, &callbackStruct, &volSync);
if (code == RXGEN_OPCODE) {
cm_SetServerNoInlineBulk(connp->serverp, 0);
} else {
}
rx_PutConnection(rxconnp);
- } while (cm_Analyze(connp, userp, reqp, &dscp->fid,
- &volSync, NULL, &cbReq, code));
+ /*
+ * If InlineBulk RPC was called and it succeeded,
+ * then pull out the return code from the status info
+ * and use it for cm_Analyze so that we can failover to other
+ * .readonly volume instances. But only do it for errors that
+ * are volume global.
+ */
+ if (inlinebulk && code == 0 && (&bbp->stats[0])->errorCode) {
+ osi_Log1(afsd_logp, "cm_TryBulkStat inline-bulk stat error: %d",
+ (&bbp->stats[0])->errorCode);
+ switch ((&bbp->stats[0])->errorCode) {
+ case VBUSY:
+ case VRESTARTING:
+ case VNOVOL:
+ case VMOVED:
+ case VOFFLINE:
+ case VSALVAGE:
+ case VNOSERVICE:
+ case VIO:
+ code = (&bbp->stats[0])->errorCode;
+ break;
+ default:
+ /* Rx and Rxkad errors are volume global */
+ if ( (&bbp->stats[0])->errorCode >= -64 && (&bbp->stats[0])->errorCode < 0 ||
+ (&bbp->stats[0])->errorCode >= ERROR_TABLE_BASE_RXK && (&bbp->stats[0])->errorCode < ERROR_TABLE_BASE_RXK + 256)
+ code = (&bbp->stats[0])->errorCode;
+ }
+ }
+ } while (cm_Analyze(connp, userp, reqp, &tfid, &volSync, NULL, &cbReq, code));
code = cm_MapRPCError(code, reqp);
- /* may as well quit on an error, since we're not going to do
+ /*
+ * might as well quit on an error, since we're not going to do
* much better on the next immediate call, either.
*/
if (code) {
inlinebulk ? "Inline" : "", code);
cm_EndCallbackGrantingCall(NULL, &cbReq, NULL, NULL, 0);
break;
- } else {
- osi_Log1(afsd_logp, "CALL %sBulkStatus SUCCESS", inlinebulk ? "Inline" : "");
}
- /* otherwise, we should do the merges */
+ /*
+ * The bulk RPC has succeeded or at least not failed with a
+ * volume global error result. For items that have inlineBulk
+ * errors we must call cm_Analyze in order to perform required
+ * logging of errors.
+ *
+ * If the RPC was not inline bulk or the entry either has no error
+ * the status must be merged.
+ */
+ osi_Log1(afsd_logp, "CALL %sBulkStatus SUCCESS", inlinebulk ? "Inline" : "");
+
for (i = 0; i<filesThisCall; i++) {
j = filex + i;
cm_SetFid(&tfid, dscp->fid.cell, bbp->fids[j].Volume, bbp->fids[j].Vnode, bbp->fids[j].Unique);
- code = cm_GetSCache(&tfid, &scp, userp, reqp);
- if (code != 0)
- continue;
- /* otherwise, if this entry has no callback info,
- * merge in this.
- */
- lock_ObtainWrite(&scp->rw);
- /* now, we have to be extra paranoid on merging in this
- * information, since we didn't use cm_SyncOp before
- * starting the fetch to make sure that no bad races
- * were occurring. Specifically, we need to make sure
- * we don't obliterate any newer information in the
- * vnode than have here.
- *
- * Right now, be pretty conservative: if there's a
- * callback or a pending call, skip it.
- * However, if the prior attempt to obtain status
- * was refused access or the volume is .readonly,
- * take the data in any case since we have nothing
- * better for the in flight directory enumeration that
- * resulted in this function being called.
- */
- if ((scp->cbServerp == NULL &&
- !(scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING | CM_SCACHEFLAG_SIZESTORING))) ||
- (scp->flags & CM_SCACHEFLAG_PURERO) ||
- (scp->flags & CM_SCACHEFLAG_EACCESS)) {
- cm_EndCallbackGrantingCall(scp, &cbReq,
- &bbp->callbacks[j],
- &volSync,
- CM_CALLBACK_MAINTAINCOUNT);
- cm_MergeStatus(dscp, scp, &bbp->stats[j], &volSync, userp, reqp, 0);
- }
- lock_ReleaseWrite(&scp->rw);
- cm_ReleaseSCache(scp);
+ if (inlinebulk && (&bbp->stats[j])->errorCode) {
+ cm_req_t treq = *reqp;
+ cm_Analyze(NULL, userp, &treq, &tfid, &volSync, NULL, &cbReq, (&bbp->stats[j])->errorCode);
+ } else {
+ code = cm_GetSCache(&tfid, &scp, userp, reqp);
+ if (code != 0)
+ continue;
+
+ /*
+ * otherwise, if this entry has no callback info,
+ * merge in this. If there is existing callback info
+ * we skip the merge because the existing data must be
+ * current (we have a callback) and the response from
+ * a non-inline bulk rpc might actually be wrong.
+ *
+ * now, we have to be extra paranoid on merging in this
+ * information, since we didn't use cm_SyncOp before
+ * starting the fetch to make sure that no bad races
+ * were occurring. Specifically, we need to make sure
+ * we don't obliterate any newer information in the
+ * vnode than have here.
+ *
+ * Right now, be pretty conservative: if there's a
+ * callback or a pending call, skip it.
+ * However, if the prior attempt to obtain status
+ * was refused access or the volume is .readonly,
+ * take the data in any case since we have nothing
+ * better for the in flight directory enumeration that
+ * resulted in this function being called.
+ */
+ lock_ObtainRead(&scp->rw);
+ if ((scp->cbServerp == NULL &&
+ !(scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING | CM_SCACHEFLAG_SIZESTORING))) ||
+ (scp->flags & CM_SCACHEFLAG_PURERO) ||
+ (scp->flags & CM_SCACHEFLAG_EACCESS))
+ {
+ lock_ConvertRToW(&scp->rw);
+ cm_EndCallbackGrantingCall(scp, &cbReq,
+ &bbp->callbacks[j],
+ &volSync,
+ CM_CALLBACK_MAINTAINCOUNT);
+ cm_MergeStatus(dscp, scp, &bbp->stats[j], &volSync, userp, reqp, 0);
+ lock_ReleaseWrite(&scp->rw);
+ } else {
+ lock_ReleaseRead(&scp->rw);
+ }
+ cm_ReleaseSCache(scp);
+ }
} /* all files in the response */
/* now tell it to drop the count,
* after doing the vnode processing above */
cm_EndCallbackGrantingCall(NULL, &cbReq, NULL, NULL, 0);
} /* while there are still more files to process */
- /* If we did the InlineBulk RPC pull out the return code and log it */
- if (inlinebulk) {
- if ((&bbp->stats[0])->errorCode) {
- osi_Log1(afsd_logp, "cm_TryBulkStat bulk stat error: %d",
- (&bbp->stats[0])->errorCode);
- code = (&bbp->stats[0])->errorCode;
- }
- }
-
return code;
}
* that someone who does a chmod will know to wait until our call
* completes.
*/
- cm_BeginDirOp(dscp, userp, reqp, CM_DIRLOCK_NONE, &dirop);
+ cm_BeginDirOp(dscp, userp, reqp, CM_DIRLOCK_NONE, CM_DIROP_FLAG_NONE,
+ &dirop);
lock_ObtainWrite(&dscp->rw);
code = cm_SyncOp(dscp, NULL, userp, reqp, 0, CM_SCACHESYNC_STOREDATA);
lock_ReleaseWrite(&dscp->rw);
* data, so that someone who does a chmod on the dir will wait until
* our call completes.
*/
- cm_BeginDirOp(dscp, userp, reqp, CM_DIRLOCK_NONE, &dirop);
+ cm_BeginDirOp(dscp, userp, reqp, CM_DIRLOCK_NONE, CM_DIROP_FLAG_NONE,
+ &dirop);
lock_ObtainWrite(&dscp->rw);
code = cm_SyncOp(dscp, NULL, userp, reqp, 0, CM_SCACHESYNC_STOREDATA);
lock_ReleaseWrite(&dscp->rw);
return CM_ERROR_CROSSDEVLINK;
}
- cm_BeginDirOp(dscp, userp, reqp, CM_DIRLOCK_NONE, &dirop);
+ cm_BeginDirOp(dscp, userp, reqp, CM_DIRLOCK_NONE, CM_DIROP_FLAG_NONE,
+ &dirop);
lock_ObtainWrite(&dscp->rw);
code = cm_SyncOp(dscp, NULL, userp, reqp, 0, CM_SCACHESYNC_STOREDATA);
lock_ReleaseWrite(&dscp->rw);
* so that someone who does a chmod on the dir will wait until our
* call completes.
*/
- cm_BeginDirOp(dscp, userp, reqp, CM_DIRLOCK_NONE, &dirop);
+ cm_BeginDirOp(dscp, userp, reqp, CM_DIRLOCK_NONE, CM_DIROP_FLAG_NONE,
+ &dirop);
lock_ObtainWrite(&dscp->rw);
code = cm_SyncOp(dscp, NULL, userp, reqp, 0, CM_SCACHESYNC_STOREDATA);
lock_ReleaseWrite(&dscp->rw);
if (fnamep == NULL) {
code = -1;
#ifdef USE_BPLUS
- code = cm_BeginDirOp(dscp, userp, reqp, CM_DIRLOCK_READ, &dirop);
+ code = cm_BeginDirOp(dscp, userp, reqp, CM_DIRLOCK_READ,
+ CM_DIROP_FLAG_NONE, &dirop);
if (code == 0) {
code = cm_BPlusDirLookupOriginalName(&dirop, cnamep, &fnamep);
if (code == 0)
* so that someone who does a chmod on the dir will wait until our
* call completes.
*/
- cm_BeginDirOp(dscp, userp, reqp, CM_DIRLOCK_NONE, &dirop);
+ cm_BeginDirOp(dscp, userp, reqp, CM_DIRLOCK_NONE, CM_DIROP_FLAG_NONE,
+ &dirop);
lock_ObtainWrite(&dscp->rw);
code = cm_SyncOp(dscp, NULL, userp, reqp, 0, CM_SCACHESYNC_STOREDATA);
lock_ReleaseWrite(&dscp->rw);
if (oldNamep == NULL) {
code = -1;
#ifdef USE_BPLUS
- code = cm_BeginDirOp(oldDscp, userp, reqp, CM_DIRLOCK_READ, &oldDirOp);
+ code = cm_BeginDirOp(oldDscp, userp, reqp, CM_DIRLOCK_READ,
+ CM_DIROP_FLAG_NONE, &oldDirOp);
if (code == 0) {
code = cm_BPlusDirLookupOriginalName(&oldDirOp, cOldNamep, &oldNamep);
if (code == 0)
}
oneDir = 1;
- cm_BeginDirOp(oldDscp, userp, reqp, CM_DIRLOCK_NONE, &oldDirOp);
+ cm_BeginDirOp(oldDscp, userp, reqp, CM_DIRLOCK_NONE,
+ CM_DIROP_FLAG_NONE, &oldDirOp);
lock_ObtainWrite(&oldDscp->rw);
cm_dnlcRemove(oldDscp, cOldNamep);
cm_dnlcRemove(oldDscp, cNewNamep);
}
if (oldDscp->fid.vnode < newDscp->fid.vnode) {
- cm_BeginDirOp(oldDscp, userp, reqp, CM_DIRLOCK_NONE, &oldDirOp);
+ cm_BeginDirOp(oldDscp, userp, reqp, CM_DIRLOCK_NONE,
+ CM_DIROP_FLAG_NONE, &oldDirOp);
lock_ObtainWrite(&oldDscp->rw);
cm_dnlcRemove(oldDscp, cOldNamep);
code = cm_SyncOp(oldDscp, NULL, userp, reqp, 0,
if (code != 0)
cm_EndDirOp(&oldDirOp);
if (code == 0) {
- cm_BeginDirOp(newDscp, userp, reqp, CM_DIRLOCK_NONE, &newDirOp);
+ cm_BeginDirOp(newDscp, userp, reqp, CM_DIRLOCK_NONE,
+ CM_DIROP_FLAG_NONE, &newDirOp);
lock_ObtainWrite(&newDscp->rw);
cm_dnlcRemove(newDscp, cNewNamep);
code = cm_SyncOp(newDscp, NULL, userp, reqp, 0,
}
else {
/* lock the new vnode entry first */
- cm_BeginDirOp(newDscp, userp, reqp, CM_DIRLOCK_NONE, &newDirOp);
+ cm_BeginDirOp(newDscp, userp, reqp, CM_DIRLOCK_NONE,
+ CM_DIROP_FLAG_NONE, &newDirOp);
lock_ObtainWrite(&newDscp->rw);
cm_dnlcRemove(newDscp, cNewNamep);
code = cm_SyncOp(newDscp, NULL, userp, reqp, 0,
if (code != 0)
cm_EndDirOp(&newDirOp);
if (code == 0) {
- cm_BeginDirOp(oldDscp, userp, reqp, CM_DIRLOCK_NONE, &oldDirOp);
+ cm_BeginDirOp(oldDscp, userp, reqp, CM_DIRLOCK_NONE,
+ CM_DIROP_FLAG_NONE, &oldDirOp);
lock_ObtainWrite(&oldDscp->rw);
cm_dnlcRemove(oldDscp, cOldNamep);
code = cm_SyncOp(oldDscp, NULL, userp, reqp, 0,