}
long
+cm_BPlusDirEnumBulkStat(cm_scache_t *dscp, cm_direnum_t *enump, cm_user_t *userp, cm_req_t *reqp)
+{
+ cm_bulkStat_t *bsp;
+ afs_uint32 count;
+ afs_uint32 code;
+
+ if ( dscp->fid.cell == AFS_FAKE_ROOT_CELL_ID )
+ return 0;
+
+ bsp = malloc(sizeof(cm_bulkStat_t));
+ memset(bsp, 0, sizeof(cm_bulkStat_t));
+
+ for ( count = 0; count < enump->count; count++ ) {
+ cm_scache_t *tscp = cm_FindSCache(&enump->entry[count].fid);
+ int i;
+
+ if (tscp) {
+ if (lock_TryWrite(&tscp->rw)) {
+ /* we have an entry that we can look at */
+ if (!(tscp->flags & CM_SCACHEFLAG_EACCESS) && cm_HaveCallback(tscp)) {
+ /* we have a callback on it. Don't bother
+ * fetching this stat entry, since we're happy
+ * with the info we have.
+ */
+ lock_ReleaseWrite(&tscp->rw);
+ cm_ReleaseSCache(tscp);
+ continue;
+ }
+ lock_ReleaseWrite(&tscp->rw);
+ } /* got lock */
+ cm_ReleaseSCache(tscp);
+ } /* found entry */
+
+ i = bsp->counter++;
+ bsp->fids[i].Volume = enump->entry[count].fid.volume;
+ bsp->fids[i].Vnode = enump->entry[count].fid.vnode;
+ bsp->fids[i].Unique = enump->entry[count].fid.unique;
+
+ if (bsp->counter == AFSCBMAX) {
+ code = cm_TryBulkStatRPC(dscp, bsp, userp, reqp);
+ memset(bsp, 0, sizeof(cm_bulkStat_t));
+ }
+ }
+
+ if (bsp->counter > 0)
+ code = cm_TryBulkStatRPC(dscp, bsp, userp, reqp);
+
+ free(bsp);
+ return 0;
+}
+
+long
cm_BPlusDirNextEnumEntry(cm_direnum_t *enump, cm_direnum_entry_t **entrypp)
{
if (enump == NULL || entrypp == NULL || enump->next > enump->count) {
return code;
}
-/* make this big enough so that one buffer of dir pages won't overflow. We'll
- * check anyway, but we want to minimize the chance that we have to leave stuff
- * unstat'd.
- */
-#define CM_BULKMAX (3 * AFSCBMAX)
-
-/* rock for bulk stat calls */
-typedef struct cm_bulkStat {
- osi_hyper_t bufOffset; /* only do it for things in this buffer page */
-
- /* info for the actual call */
- int counter; /* next free slot */
- AFSFid fids[CM_BULKMAX];
- AFSFetchStatus stats[CM_BULKMAX];
- AFSCallBack callbacks[CM_BULKMAX];
-} cm_bulkStat_t;
-
/* for a given entry, make sure that it isn't in the stat cache, and then
* add it to the list of file IDs to be obtained.
*
return 0;
}
-/* called with a write locked scp and a pointer to a buffer. Make bulk stat
- * calls on all undeleted files in the page of the directory specified.
- */
afs_int32
-cm_TryBulkStat(cm_scache_t *dscp, osi_hyper_t *offsetp, cm_user_t *userp,
- cm_req_t *reqp)
+cm_TryBulkStatRPC(cm_scache_t *dscp, cm_bulkStat_t *bbp, cm_user_t *userp, cm_req_t *reqp)
{
- long code;
- cm_bulkStat_t bb; /* this is *BIG*, probably 16K or so;
- * watch for stack problems */
+ afs_int32 code = 0;
AFSCBFids fidStruct;
AFSBulkStats statStruct;
cm_conn_t *connp;
cm_fid_t tfid;
struct rx_connection * callp;
int inlinebulk = 0; /* Did we use InlineBulkStatus RPC or not? */
-
- osi_Log1(afsd_logp, "cm_TryBulkStat dir 0x%p", dscp);
-
- /* should be on a buffer boundary */
- osi_assertx((offsetp->LowPart & (cm_data.buf_blockSize - 1)) == 0, "invalid offset");
-
- memset(&bb, 0, sizeof(bb));
- bb.bufOffset = *offsetp;
-
- lock_ReleaseWrite(&dscp->rw);
- /* first, assemble the file IDs we need to stat */
- code = cm_ApplyDir(dscp, cm_TryBulkProc, (void *) &bb, offsetp, userp, reqp, NULL);
-
- /* if we failed, bail out early */
- if (code && code != CM_ERROR_STOPNOW) {
- lock_ObtainWrite(&dscp->rw);
- return code;
- }
-
+
/* otherwise, we may have one or more bulk stat's worth of stuff in bb;
* make the calls to create the entries. Handle AFSCBMAX files at a
* time.
*/
- filex = 0;
- while (filex < bb.counter) {
- filesThisCall = bb.counter - filex;
+ for (filex = 0; filex < bbp->counter; filex += filesThisCall) {
+ filesThisCall = bbp->counter - filex;
if (filesThisCall > AFSCBMAX)
filesThisCall = AFSCBMAX;
fidStruct.AFSCBFids_len = filesThisCall;
- fidStruct.AFSCBFids_val = &bb.fids[filex];
+ fidStruct.AFSCBFids_val = &bbp->fids[filex];
statStruct.AFSBulkStats_len = filesThisCall;
- statStruct.AFSBulkStats_val = &bb.stats[filex];
+ statStruct.AFSBulkStats_val = &bbp->stats[filex];
callbackStruct.AFSCBs_len = filesThisCall;
- callbackStruct.AFSCBs_val = &bb.callbacks[filex];
+ callbackStruct.AFSCBs_val = &bbp->callbacks[filex];
cm_StartCallbackGrantingCall(NULL, &cbReq);
osi_Log1(afsd_logp, "CALL BulkStatus, %d entries", filesThisCall);
do {
/* otherwise, we should do the merges */
for (i = 0; i<filesThisCall; i++) {
j = filex + i;
- cm_SetFid(&tfid, dscp->fid.cell, bb.fids[j].Volume, bb.fids[j].Vnode, bb.fids[j].Unique);
+ cm_SetFid(&tfid, dscp->fid.cell, bbp->fids[j].Volume, bbp->fids[j].Vnode, bbp->fids[j].Unique);
code = cm_GetSCache(&tfid, &scp, userp, reqp);
if (code != 0)
continue;
| CM_SCACHEFLAG_STORING
| CM_SCACHEFLAG_SIZESTORING))) {
cm_EndCallbackGrantingCall(scp, &cbReq,
- &bb.callbacks[j],
+ &bbp->callbacks[j],
CM_CALLBACK_MAINTAINCOUNT);
- cm_MergeStatus(dscp, scp, &bb.stats[j], &volSync, userp, 0);
+ cm_MergeStatus(dscp, scp, &bbp->stats[j], &volSync, userp, 0);
}
lock_ReleaseWrite(&scp->rw);
cm_ReleaseSCache(scp);
/* now tell it to drop the count,
* after doing the vnode processing above */
cm_EndCallbackGrantingCall(NULL, &cbReq, NULL, 0);
-
- filex += filesThisCall;
} /* while there are still more files to process */
- lock_ObtainWrite(&dscp->rw);
/* If we did the InlineBulk RPC pull out the return code and log it */
if (inlinebulk) {
- if ((&bb.stats[0])->errorCode) {
+ if ((&bbp->stats[0])->errorCode) {
osi_Log1(afsd_logp, "cm_TryBulkStat bulk stat error: %d",
- (&bb.stats[0])->errorCode);
+ (&bbp->stats[0])->errorCode);
+ code = (&bbp->stats[0])->errorCode;
}
}
- osi_Log0(afsd_logp, "END cm_TryBulkStat");
+ return code;
+}
+
+/* called with a write locked scp and a pointer to a buffer. Make bulk stat
+ * calls on all undeleted files in the page of the directory specified.
+ */
+afs_int32
+cm_TryBulkStat(cm_scache_t *dscp, osi_hyper_t *offsetp, cm_user_t *userp,
+ cm_req_t *reqp)
+{
+ afs_int32 code;
+ cm_bulkStat_t *bbp;
+
+ osi_Log1(afsd_logp, "cm_TryBulkStat dir 0x%p", dscp);
+
+ /* should be on a buffer boundary */
+ osi_assertx((offsetp->LowPart & (cm_data.buf_blockSize - 1)) == 0, "invalid offset");
+
+ bbp = malloc(sizeof(cm_bulkStat_t));
+ memset(bbp, 0, sizeof(cm_bulkStat_t));
+ bbp->bufOffset = *offsetp;
+
+ lock_ReleaseWrite(&dscp->rw);
+ /* first, assemble the file IDs we need to stat */
+ code = cm_ApplyDir(dscp, cm_TryBulkProc, (void *) bbp, offsetp, userp, reqp, NULL);
+
+ /* if we failed, bail out early */
+ if (code && code != CM_ERROR_STOPNOW) {
+ free(bbp);
+ lock_ObtainWrite(&dscp->rw);
+ return code;
+ }
+
+ code = cm_TryBulkStatRPC(dscp, bbp, userp, reqp);
+ osi_Log1(afsd_logp, "END cm_TryBulkStat code 0x%x", code);
+
+ lock_ObtainWrite(&dscp->rw);
+ free(bbp);
return 0;
}