From: Jeffrey Altman Date: Thu, 7 Feb 2013 21:53:45 +0000 (-0500) Subject: Windows: RXAFS_BulkStat failures X-Git-Tag: openafs-stable-1_8_0pre1~1543 X-Git-Url: https://git.openafs.org/?p=openafs.git;a=commitdiff_plain;h=cb414f6899212f314313a781b63486661e9d1394 Windows: RXAFS_BulkStat failures The RXAFS_BulkStat RPC is quite brain dead. The client requests status information on up to AFSCBMAX FIDs. The file server replies success only if all of the client credentials provide access to all of the requested FIDs. If status info cannot be provided for any one of the FIDs, the error code of the failure is returned with no context as to which FID failed. To simplify the logic within the cache manager a new local error code, CM_ERROR_BULKSTAT_FAILURE is introduced to replace whatever error was received from the file server. This error is returned by cm_TryBulkStat and cm_TryBulkStatRPC. The caller of either of those functions should interpret the error to mean that the current user context cannot be used to perform a bulkstat operation against the provided cm_scache directory. Instead, individual RXAFS_FetchStatus operations must be performed. This patchset implements such error handling for both the SMB and RDR interfaces. This change permits the Windows cache manager to properly enumerate a directory for which the user only has list permission and cannot read the status info for files and symlinks. Change-Id: I8cc47a5cedfd4e7bf0db55efffc5e95be5172e85 Reviewed-on: http://gerrit.openafs.org/9080 Tested-by: BuildBot Reviewed-by: Jeffrey Altman Tested-by: Jeffrey Altman --- diff --git a/src/WINNT/afsd/cm_btree.c b/src/WINNT/afsd/cm_btree.c index 3550eb1..f360c52 100644 --- a/src/WINNT/afsd/cm_btree.c +++ b/src/WINNT/afsd/cm_btree.c @@ -2348,12 +2348,15 @@ cm_BPlusDirEnumBulkStat(cm_direnum_t *enump) cm_user_t *userp = enump->userp; cm_bulkStat_t *bsp = NULL; afs_uint32 ** bs_errorCodep = NULL; + afs_uint32 ** bs_flagsp = NULL; afs_uint32 dscp_errorCode = 0; + afs_uint32 dscp_flags = 0; afs_uint32 count; afs_uint32 code = 0; cm_req_t req; int i; cm_scache_t *tscp; + afs_int32 nobulkstat = 0; cm_InitReq(&req); req.flags = enump->reqFlags; @@ -2369,12 +2372,18 @@ cm_BPlusDirEnumBulkStat(cm_direnum_t *enump) memset(bsp, 0, sizeof(cm_bulkStat_t)); bsp->userp = userp; - bs_errorCodep = malloc(sizeof(DWORD *) * AFSCBMAX); + bs_errorCodep = malloc(sizeof(afs_uint32 *) * AFSCBMAX); if (!bs_errorCodep) { code = ENOMEM; goto done; } + bs_flagsp = malloc(sizeof(afs_uint32 *) * AFSCBMAX); + if (!bs_flagsp) { + code = ENOMEM; + goto done; + } + /* * In order to prevent the directory callback from expiring * on really large directories with many symlinks to mount @@ -2385,8 +2394,10 @@ cm_BPlusDirEnumBulkStat(cm_direnum_t *enump) bsp->fids[0].Vnode = dscp->fid.vnode; bsp->fids[0].Unique = dscp->fid.unique; bs_errorCodep[0] = &dscp_errorCode; + bs_flagsp[0] = &dscp_flags; bsp->counter++; + restart_stat: for ( count = 0; count < enump->count; count++ ) { if ( !wcscmp(L".", enump->entry[count].name) || !wcscmp(L"..", enump->entry[count].name) ) { continue; @@ -2407,6 +2418,17 @@ cm_BPlusDirEnumBulkStat(cm_direnum_t *enump) enump->entry[count].errorCode = 0; continue; } + + if (nobulkstat) { + code = cm_SyncOp(tscp, NULL, userp, &req, 0, + CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS); + lock_ReleaseWrite(&tscp->rw); + cm_ReleaseSCache(tscp); + enump->entry[count].flags |= CM_DIRENUM_FLAG_GOT_STATUS; + enump->entry[count].errorCode = code; + continue; + } + lock_ReleaseWrite(&tscp->rw); } /* got lock */ cm_ReleaseSCache(tscp); @@ -2416,14 +2438,30 @@ cm_BPlusDirEnumBulkStat(cm_direnum_t *enump) bsp->fids[i].Volume = enump->entry[count].fid.volume; bsp->fids[i].Vnode = enump->entry[count].fid.vnode; bsp->fids[i].Unique = enump->entry[count].fid.unique; - enump->entry[count].flags |= CM_DIRENUM_FLAG_GOT_STATUS; bs_errorCodep[i] = &enump->entry[count].errorCode; + bs_flagsp[bsp->counter] = &enump->entry[i].flags; + enump->entry[count].flags |= CM_DIRENUM_FLAG_GOT_STATUS; if (bsp->counter == AFSCBMAX) { code = cm_TryBulkStatRPC(dscp, bsp, userp, &req); - if (code) - goto done; + if (code == CM_ERROR_BULKSTAT_FAILURE) { + /* + * If bulk stat cannot be used for this directory + * we must perform individual fetch status calls. + * Restart from the beginning of the enumeration. + */ + nobulkstat = 1; + for (i=0; icounter; i++) { + *(bs_flagsp[i]) &= ~CM_DIRENUM_FLAG_GOT_STATUS; + } + goto restart_stat; + } + + if (code) { + /* on any other error, exit */ + goto done; + } for ( i=0; icounter; i++) { *(bs_errorCodep[i]) = cm_MapRPCError(bsp->stats[i].errorCode, &req); } @@ -2451,6 +2489,20 @@ cm_BPlusDirEnumBulkStat(cm_direnum_t *enump) if (bsp->counter > 0) { code = cm_TryBulkStatRPC(dscp, bsp, userp, &req); + if (code == CM_ERROR_BULKSTAT_FAILURE) { + /* + * If bulk stat cannot be used for this directory + * we must perform individual fetch status calls. + * Restart from the beginning of the enumeration. + */ + nobulkstat = 1; + + for (i=0; icounter; i++) { + *(bs_flagsp[i]) &= ~CM_DIRENUM_FLAG_GOT_STATUS; + } + goto restart_stat; + } + if (code) goto done; @@ -2469,6 +2521,8 @@ cm_BPlusDirEnumBulkStat(cm_direnum_t *enump) free(bsp); if (bs_errorCodep) free(bs_errorCodep); + if (bs_flagsp) + free(bs_flagsp); return code; } @@ -2486,19 +2540,22 @@ cm_BPlusDirEnumBulkStatOne(cm_direnum_t *enump, cm_scache_t *scp) cm_user_t *userp = enump->userp; cm_bulkStat_t *bsp = NULL; afs_uint32 ** bs_errorCodep = NULL; + afs_uint32 ** bs_flagsp = NULL; afs_uint32 dscp_errorCode = 0; + afs_uint32 dscp_flags = 0; afs_uint32 scp_errorCode = 0; + afs_uint32 scp_flags = 0; afs_uint32 code = 0; afs_uint32 i; cm_req_t req; cm_scache_t *tscp; - cm_InitReq(&req); - req.flags = enump->reqFlags; - if ( dscp->fid.cell == AFS_FAKE_ROOT_CELL_ID ) return 0; + cm_InitReq(&req); + req.flags = enump->reqFlags; + bsp = malloc(sizeof(cm_bulkStat_t)); if (!bsp) { code = ENOMEM; @@ -2507,12 +2564,18 @@ cm_BPlusDirEnumBulkStatOne(cm_direnum_t *enump, cm_scache_t *scp) memset(bsp, 0, sizeof(cm_bulkStat_t)); bsp->userp = userp; - bs_errorCodep = malloc(sizeof(DWORD *) * AFSCBMAX); + bs_errorCodep = malloc(sizeof(afs_uint32 *) * AFSCBMAX); if (!bs_errorCodep) { code = ENOMEM; goto done; } + bs_flagsp = malloc(sizeof(afs_uint32 *) * AFSCBMAX); + if (!bs_flagsp) { + code = ENOMEM; + goto done; + } + /* * In order to prevent the directory callback from expiring * on really large directories with many symlinks to mount @@ -2523,6 +2586,7 @@ cm_BPlusDirEnumBulkStatOne(cm_direnum_t *enump, cm_scache_t *scp) bsp->fids[0].Vnode = dscp->fid.vnode; bsp->fids[0].Unique = dscp->fid.unique; bs_errorCodep[0] = &dscp_errorCode; + bs_flagsp[0] = &dscp_flags; bsp->counter++; /* @@ -2537,6 +2601,7 @@ cm_BPlusDirEnumBulkStatOne(cm_direnum_t *enump, cm_scache_t *scp) bsp->fids[1].Vnode = scp->fid.vnode; bsp->fids[1].Unique = scp->fid.unique; bs_errorCodep[1] = &scp_errorCode; + bs_flagsp[1] = &scp_flags; bsp->counter++; if (enump->count <= AFSCBMAX - 1) { @@ -2581,14 +2646,27 @@ cm_BPlusDirEnumBulkStatOne(cm_direnum_t *enump, cm_scache_t *scp) bsp->fids[bsp->counter].Volume = enump->entry[i].fid.volume; bsp->fids[bsp->counter].Vnode = enump->entry[i].fid.vnode; bsp->fids[bsp->counter].Unique = enump->entry[i].fid.unique; - enump->entry[i].flags |= CM_DIRENUM_FLAG_GOT_STATUS; bs_errorCodep[bsp->counter] = &enump->entry[i].errorCode; + bs_flagsp[bsp->counter] = &enump->entry[i].flags; + enump->entry[i].flags |= CM_DIRENUM_FLAG_GOT_STATUS; bsp->counter++; } if (bsp->counter > 0) { code = cm_TryBulkStatRPC(dscp, bsp, userp, &req); /* Now process any errors that might have occurred */ + if (code == CM_ERROR_BULKSTAT_FAILURE) { + for (i=2; icounter; i++) { + *(bs_flagsp[i]) &= ~CM_DIRENUM_FLAG_GOT_STATUS; + } + + lock_ObtainWrite(&scp->rw); + code = cm_SyncOp(scp, NULL, userp, &req, 0, + CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS); + lock_ReleaseWrite(&scp->rw); + goto done; + } + if (code) goto done; @@ -2608,6 +2686,8 @@ cm_BPlusDirEnumBulkStatOne(cm_direnum_t *enump, cm_scache_t *scp) free(bsp); if (bs_errorCodep) free(bs_errorCodep); + if (bs_flagsp) + free(bs_flagsp); return code; } @@ -2619,19 +2699,22 @@ cm_BPlusDirEnumBulkStatNext(cm_direnum_t *enump) cm_user_t *userp = enump->userp; cm_bulkStat_t *bsp = NULL; afs_uint32 ** bs_errorCodep = NULL; + afs_uint32 ** bs_flagsp = NULL; afs_uint32 dscp_errorCode = 0; + afs_uint32 dscp_flags = 0; afs_uint32 count; afs_uint32 code = 0; cm_req_t req; cm_scache_t *tscp; + afs_int32 next = -1; int i; - cm_InitReq(&req); - req.flags = enump->reqFlags; - if ( dscp->fid.cell == AFS_FAKE_ROOT_CELL_ID ) return 0; + cm_InitReq(&req); + req.flags = enump->reqFlags; + bsp = malloc(sizeof(cm_bulkStat_t)); if (!bsp) { code = ENOMEM; @@ -2640,12 +2723,18 @@ cm_BPlusDirEnumBulkStatNext(cm_direnum_t *enump) memset(bsp, 0, sizeof(cm_bulkStat_t)); bsp->userp = userp; - bs_errorCodep = malloc(sizeof(DWORD *) * AFSCBMAX); + bs_errorCodep = malloc(sizeof(afs_uint32 *) * AFSCBMAX); if (!bs_errorCodep) { code = ENOMEM; goto done; } + bs_flagsp = malloc(sizeof(afs_uint32 *) * AFSCBMAX); + if (!bs_flagsp) { + code = ENOMEM; + goto done; + } + /* * In order to prevent the directory callback from expiring * on really large directories with many symlinks to mount @@ -2656,6 +2745,7 @@ cm_BPlusDirEnumBulkStatNext(cm_direnum_t *enump) bsp->fids[0].Vnode = dscp->fid.vnode; bsp->fids[0].Unique = dscp->fid.unique; bs_errorCodep[0] = &dscp_errorCode; + bs_flagsp[0] = &dscp_flags; bsp->counter++; for ( count = enump->next; count < enump->count && bsp->counter < AFSCBMAX; count++ ) { @@ -2663,6 +2753,8 @@ cm_BPlusDirEnumBulkStatNext(cm_direnum_t *enump) continue; } + if (next == -1) + next = count; tscp = cm_FindSCache(&enump->entry[count].fid); if (tscp) { if (lock_TryWrite(&tscp->rw)) { @@ -2686,14 +2778,37 @@ cm_BPlusDirEnumBulkStatNext(cm_direnum_t *enump) bsp->fids[bsp->counter].Volume = enump->entry[count].fid.volume; bsp->fids[bsp->counter].Vnode = enump->entry[count].fid.vnode; bsp->fids[bsp->counter].Unique = enump->entry[count].fid.unique; - enump->entry[count].flags |= CM_DIRENUM_FLAG_GOT_STATUS; bs_errorCodep[bsp->counter] = &enump->entry[count].errorCode; + bs_flagsp[bsp->counter] = &enump->entry[count].flags; + enump->entry[count].flags |= CM_DIRENUM_FLAG_GOT_STATUS; bsp->counter++; } if (bsp->counter > 0) { code = cm_TryBulkStatRPC(dscp, bsp, userp, &req); /* Now process any errors that might have occurred */ + if (code == CM_ERROR_BULKSTAT_FAILURE) { + for (i=0; icounter; i++) { + *(bs_flagsp[i]) &= ~CM_DIRENUM_FLAG_GOT_STATUS; + } + + code = cm_GetSCache(&enump->entry[next].fid, NULL, &tscp, userp, &req); + if (code == 0) { + if (lock_TryWrite(&tscp->rw)) { + code = cm_SyncOp(tscp, NULL, userp, &req, 0, + CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS); + lock_ReleaseWrite(&tscp->rw); + *(bs_errorCodep[1]) = code; + *(bs_flagsp[1]) |= CM_DIRENUM_FLAG_GOT_STATUS; + } + cm_ReleaseSCache(tscp); + } else { + *(bs_errorCodep[1]) = code; + *(bs_flagsp[1]) |= CM_DIRENUM_FLAG_GOT_STATUS; + } + goto done; + } + if (code) goto done; @@ -2712,6 +2827,8 @@ cm_BPlusDirEnumBulkStatNext(cm_direnum_t *enump) free(bsp); if (bs_errorCodep) free(bs_errorCodep); + if (bs_flagsp) + free(bs_flagsp); return code; } diff --git a/src/WINNT/afsd/cm_error.h b/src/WINNT/afsd/cm_error.h index e13a2d2..779111a 100644 --- a/src/WINNT/afsd/cm_error.h +++ b/src/WINNT/afsd/cm_error.h @@ -78,6 +78,7 @@ #define CM_ERROR_BUFFER_OVERFLOW (CM_ERROR_BASE+64) #define CM_ERROR_EMPTY (CM_ERROR_BASE+65) #define CM_ERROR_INVAL_NET_RESP (CM_ERROR_BASE+66) +#define CM_ERROR_BULKSTAT_FAILURE (CM_ERROR_BASE+67) #endif /* OPENAFS_WINNT_AFSD_CM_ERROR_H */ diff --git a/src/WINNT/afsd/cm_vnodeops.c b/src/WINNT/afsd/cm_vnodeops.c index b813532..84446ee 100644 --- a/src/WINNT/afsd/cm_vnodeops.c +++ b/src/WINNT/afsd/cm_vnodeops.c @@ -2505,6 +2505,17 @@ cm_TryBulkStatRPC(cm_scache_t *dscp, cm_bulkStat_t *bbp, cm_user_t *userp, cm_re } } if (!inlinebulk) { + /* + * It is important to note that RXAFS_BulkStatus is quite braindead. + * The AFS 3.6 file server implementation returns arrays that are + * sized to hold responses for all of the requested FIDs but it only + * populates their contents up to the point where it detects an error. + * Unfortunately, it does inform the caller which entries were filled + * and which were not. The caller has no ability to determine which + * FID the RPC return code applies to or which of the FIDs valid status + * info and callbacks have been issued for. As a result, when an + * error is returned, none of the data received can be trusted. + */ code = RXAFS_BulkStatus(rxconnp, &fidStruct, &statStruct, &callbackStruct, &volSync); } @@ -2548,6 +2559,17 @@ cm_TryBulkStatRPC(cm_scache_t *dscp, cm_bulkStat_t *bbp, cm_user_t *userp, cm_re if (code) { osi_Log2(afsd_logp, "CALL %sBulkStatus FAILURE code 0x%x", inlinebulk ? "Inline" : "", code); + if (!inlinebulk) { + /* + * Since an error occurred and it is impossible to determine + * the context in which the returned error code should be + * interpreted, we return the CM_ERROR_BULKSTAT_FAILURE error + * which indicates that Bulk Stat cannot be used for the + * current request. The caller should fallback to using + * individual RXAFS_FetchStatus calls. + */ + code = CM_ERROR_BULKSTAT_FAILURE; + } cm_EndCallbackGrantingCall(NULL, &cbReq, NULL, NULL, 0); break; } diff --git a/src/WINNT/afsd/smb.c b/src/WINNT/afsd/smb.c index 99c3bc6..31a49a8 100644 --- a/src/WINNT/afsd/smb.c +++ b/src/WINNT/afsd/smb.c @@ -4655,6 +4655,7 @@ smb_ApplyDirListPatches(cm_scache_t * dscp, smb_dirListPatch_t **dirPatchespp, clientchar_t path[AFSPATHMAX]; afs_uint32 rights; afs_int32 mustFake = 0; + afs_int32 nobulkstat = 0; lock_ObtainWrite(&dscp->rw); code = cm_FindACLCache(dscp, userp, &rights); @@ -4677,6 +4678,7 @@ smb_ApplyDirListPatches(cm_scache_t * dscp, smb_dirListPatch_t **dirPatchespp, memset(bsp, 0, sizeof(cm_bulkStat_t)); bsp->userp = userp; + restart_patchset: for (patchp = *dirPatchespp, count=0; patchp; patchp = (smb_dirListPatch_t *) osi_QNext(&patchp->q)) { @@ -4695,6 +4697,15 @@ smb_ApplyDirListPatches(cm_scache_t * dscp, smb_dirListPatch_t **dirPatchespp, cm_ReleaseSCache(tscp); continue; } + + if (nobulkstat) { + code = cm_SyncOp(tscp, NULL, userp, reqp, 0, + CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS); + lock_ReleaseWrite(&tscp->rw); + cm_ReleaseSCache(tscp); + continue; + } + lock_ReleaseWrite(&tscp->rw); } /* got lock */ cm_ReleaseSCache(tscp); @@ -4709,6 +4720,16 @@ smb_ApplyDirListPatches(cm_scache_t * dscp, smb_dirListPatch_t **dirPatchespp, code = cm_TryBulkStatRPC(dscp, bsp, userp, reqp); memset(bsp, 0, sizeof(cm_bulkStat_t)); bsp->userp = userp; + + if (code == CM_ERROR_BULKSTAT_FAILURE) { + /* + * If bulk stat cannot be used for this directory + * we must perform individual fetch status calls. + * Restart from the beginning of the patch series. + */ + nobulkstat = 1; + goto restart_patchset; + } } } diff --git a/src/WINNT/afsd/smb3.c b/src/WINNT/afsd/smb3.c index 2811419..d8a956b 100644 --- a/src/WINNT/afsd/smb3.c +++ b/src/WINNT/afsd/smb3.c @@ -4666,6 +4666,7 @@ smb_ApplyV3DirListPatches(cm_scache_t *dscp, smb_dirListPatch_t **dirPatchespp, smb_dirListPatch_t *npatchp; afs_uint32 rights; afs_int32 mustFake = 0; + afs_int32 nobulkstat = 0; clientchar_t path[AFSPATHMAX]; lock_ObtainWrite(&dscp->rw); @@ -4691,6 +4692,7 @@ smb_ApplyV3DirListPatches(cm_scache_t *dscp, smb_dirListPatch_t **dirPatchespp, memset(bsp, 0, sizeof(cm_bulkStat_t)); bsp->userp = userp; + restart_patchset: for (patchp = *dirPatchespp, count=0; patchp; patchp = (smb_dirListPatch_t *) osi_QNext(&patchp->q)) { @@ -4726,6 +4728,15 @@ smb_ApplyV3DirListPatches(cm_scache_t *dscp, smb_dirListPatch_t **dirPatchespp, cm_ReleaseSCache(tscp); continue; } + + if (nobulkstat) { + code = cm_SyncOp(tscp, NULL, userp, reqp, 0, + CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS); + lock_ReleaseWrite(&tscp->rw); + cm_ReleaseSCache(tscp); + continue; + } + lock_ReleaseWrite(&tscp->rw); } /* got lock */ cm_ReleaseSCache(tscp); @@ -4740,6 +4751,16 @@ smb_ApplyV3DirListPatches(cm_scache_t *dscp, smb_dirListPatch_t **dirPatchespp, code = cm_TryBulkStatRPC(dscp, bsp, userp, reqp); memset(bsp, 0, sizeof(cm_bulkStat_t)); bsp->userp = userp; + + if (code == CM_ERROR_BULKSTAT_FAILURE) { + /* + * If bulk stat cannot be used for this directory + * we must perform individual fetch status calls. + * Restart from the beginning of the patch series. + */ + nobulkstat = 1; + goto restart_patchset; + } } }