#define LockPage(pp) lock_page(pp)
#define UnlockPage(pp) unlock_page(pp)
#endif
-#define AFS_KMAP_ATOMIC
#ifndef afs_min
#define afs_min(A,B) ((A)<(B)) ? (A) : (B)
if (!avc)
return;
- if (avc->f.states & FCSBypass)
- osi_Panic("afs_TransitionToBypass: illegal transition to bypass--already FCSBypass\n");
-
if (aflags & TRANSChangeDesiredBit)
setDesire = 1;
if (aflags & TRANSSetManualBit)
#else
AFS_GLOCK();
#endif
+
ObtainWriteLock(&avc->lock, 925);
+ /*
+ * Someone may have beat us to doing the transition - we had no lock
+ * when we checked the flag earlier. No cause to panic, just return.
+ */
+ if (avc->f.states & FCSBypass)
+ goto done;
/* If we never cached this, just change state */
if (setDesire && (!(avc->cachingStates & FCSBypass))) {
if (!avc)
return;
- if (!(avc->f.states & FCSBypass))
- osi_Panic("afs_TransitionToCaching: illegal transition to caching--already caching\n");
-
if (aflags & TRANSChangeDesiredBit)
resetDesire = 1;
if (aflags & TRANSSetManualBit)
AFS_GLOCK();
#endif
ObtainWriteLock(&avc->lock, 926);
+ /*
+ * Someone may have beat us to doing the transition - we had no lock
+ * when we checked the flag earlier. No cause to panic, just return.
+ */
+ if (!(avc->f.states & FCSBypass))
+ goto done;
/* Ok, we actually do need to flush */
ObtainWriteLock(&afs_xcbhash, 957);
avc->cachingStates |= FCSManuallySet;
avc->cachingTransitions++;
+done:
ReleaseWriteLock(&avc->lock);
#ifdef AFS_BOZONLOCK_ENV
afs_BozonUnlock(&avc->pvnLock, avc);
ciov = auio->uio_iov; \
iovmax = auio->uio_iovcnt - 1; \
pp = (struct page*) ciov->iov_base; \
- afs_warn("BYPASS: Unlocking pages..."); \
while(1) { \
- if(pp != NULL && PageLocked(pp)) \
- UnlockPage(pp); \
- put_page(pp); /* decrement refcount */ \
+ if (pp) { \
+ if (PageLocked(pp)) \
+ UnlockPage(pp); \
+ put_page(pp); /* decrement refcount */ \
+ } \
iovno++; \
if(iovno > iovmax) \
break; \
ciov = (auio->uio_iov + iovno); \
pp = (struct page*) ciov->iov_base; \
} \
- afs_warn("Pages Unlocked.\n"); \
} while(0)
#else
#ifdef UKERNEL
struct iovec *ciov;
struct page *pp;
char *address;
-#ifdef AFS_KMAP_ATOMIC
char *page_buffer = osi_Alloc(PAGE_SIZE);
-#else
- char *page_buffer = NULL;
-#endif
ciov = auio->uio_iov;
pp = (struct page*) ciov->iov_base;
goto done;
}
+ /* If we get a 0 length reply, time to cleanup and return */
+ if (length == 0) {
+ unlock_and_release_pages(auio);
+ result = 0;
+ goto done;
+ }
+
/*
* The fetch protocol is extended for the AFS/DFS translator
* to allow multiple blocks of data, each with its own length,
clen = ciov->iov_len - iovoff;
tlen = afs_min(length, clen);
#ifdef AFS_LINUX24_ENV
-#ifndef AFS_KMAP_ATOMIC
- if(pp)
- address = kmap(pp);
- else {
- /* rx doesn't provide an interface to simply advance
- or consume n bytes. for now, allocate a PAGE_SIZE
- region of memory to receive bytes in the case that
- there were holes in readpages */
- if(page_buffer == NULL)
- page_buffer = osi_Alloc(PAGE_SIZE);
- address = page_buffer;
- }
-#else
address = page_buffer;
-#endif
#else
#ifndef UKERNEL
#error AFS_CACHE_BYPASS not implemented on this platform
address += code;
} else {
#ifdef AFS_LINUX24_ENV
-#ifdef AFS_KMAP_ATOMIC
if(pp) {
address = kmap_atomic(pp, KM_USER0);
memcpy(address, page_buffer, PAGE_SIZE);
kunmap_atomic(address, KM_USER0);
}
-#endif
#else
#ifndef UKERNEL
#error AFS_CACHE_BYPASS not implemented on this platform
else
afs_warn("afs_NoCacheFetchProc: page not locked at iovno %d!\n", iovno);
put_page(pp); /* decrement refcount */
-#ifndef AFS_KMAP_ATOMIC
- kunmap(pp);
-#endif
#else
#ifndef UKERNEL
#error AFS_CACHE_BYPASS not implemented on this platform