3 * THE REGENTS OF THE UNIVERSITY OF MICHIGAN
6 * Permission is granted to use, copy, create derivative works
7 * and redistribute this software and such derivative works
8 * for any purpose, so long as the name of The University of
9 * Michigan is not used in any advertising or publicity
10 * pertaining to the use of distribution of this software
11 * without specific, written prior authorization. If the
12 * above copyright notice or any other identification of the
13 * University of Michigan is included in any copy of any
14 * portion of this software, then the disclaimer below must
17 * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION
18 * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY
19 * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY O
20 * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
21 * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
22 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
23 * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE
24 * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR
25 * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING
26 * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
27 * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF
32 * Portions Copyright (c) 2008
33 * The Linux Box Corporation
36 * Permission is granted to use, copy, create derivative works
37 * and redistribute this software and such derivative works
38 * for any purpose, so long as the name of the Linux Box
39 * Corporation is not used in any advertising or publicity
40 * pertaining to the use or distribution of this software
41 * without specific, written prior authorization. If the
42 * above copyright notice or any other identification of the
43 * Linux Box Corporation is included in any copy of any
44 * portion of this software, then the disclaimer below must
47 * This software is provided as is, without representation
48 * from the Linux Box Corporation as to its fitness for any
49 * purpose, and without warranty by the Linux Box Corporation
50 * of any kind, either express or implied, including
51 * without limitation the implied warranties of
52 * merchantability and fitness for a particular purpose. The
53 * Linux Box Corporation shall not be liable for any damages,
54 * including special, indirect, incidental, or consequential
55 * damages, with respect to any claim arising out of or in
56 * connection with the use of the software, even if it has been
57 * or is hereafter advised of the possibility of such damages.
61 #include <afsconfig.h>
62 #include "afs/param.h"
63 #if defined(AFS_CACHE_BYPASS) || defined(UKERNEL)
64 #include "afs/afs_bypasscache.h"
70 #include "afs/sysincludes.h" /* Standard vendor system headers */
71 #include "afs/afsincludes.h" /* Afs-based standard headers */
72 #include "afs/afs_stats.h" /* statistics */
73 #include "afs/nfsclient.h"
74 #include "rx/rx_globals.h"
77 #define afs_min(A,B) ((A)<(B)) ? (A) : (B)
80 /* conditional GLOCK macros */
81 #define COND_GLOCK(var) \
83 var = ISAFS_GLOCK(); \
88 #define COND_RE_GUNLOCK(var) \
95 /* conditional GUNLOCK macros */
97 #define COND_GUNLOCK(var) \
99 var = ISAFS_GLOCK(); \
104 #define COND_RE_GLOCK(var) \
111 int cache_bypass_strategy = NEVER_BYPASS_CACHE;
112 int cache_bypass_threshold = AFS_CACHE_BYPASS_DISABLED; /* file size > threshold triggers bypass */
113 int cache_bypass_prefetch = 1; /* Should we do prefetching ? */
115 extern afs_rwlock_t afs_xcbhash;
118 * This is almost exactly like the PFlush() routine in afs_pioctl.c,
119 * but that routine is static. We are about to change a file from
120 * normal caching to bypass it's caching. Therefore, we want to
121 * free up any cache space in use by the file, and throw out any
122 * existing VM pages for the file. We keep track of the number of
123 * times we go back and forth from caching to bypass.
126 afs_TransitionToBypass(struct vcache *avc,
127 afs_ucred_t *acred, int aflags)
131 struct vrequest treq;
138 if (aflags & TRANSChangeDesiredBit)
140 if (aflags & TRANSSetManualBit)
143 #ifdef AFS_BOZONLOCK_ENV
144 afs_BozonLock(&avc->pvnLock, avc); /* Since afs_TryToSmush will do a pvn_vptrunc */
149 ObtainWriteLock(&avc->lock, 925);
151 * Someone may have beat us to doing the transition - we had no lock
152 * when we checked the flag earlier. No cause to panic, just return.
154 if (avc->cachingStates & FCSBypass)
157 /* If we never cached this, just change state */
158 if (setDesire && (!(avc->cachingStates & FCSBypass))) {
159 avc->cachingStates |= FCSBypass;
163 /* cg2v, try to store any chunks not written 20071204 */
164 if (avc->execsOrWriters > 0) {
165 code = afs_InitReq(&treq, acred);
167 code = afs_StoreAllSegments(avc, &treq, AFS_SYNC | AFS_LASTSTORE);
171 /* also cg2v, don't dequeue the callback */
172 ObtainWriteLock(&afs_xcbhash, 956);
173 afs_DequeueCallback(avc);
174 ReleaseWriteLock(&afs_xcbhash);
176 avc->f.states &= ~(CStatd | CDirty); /* next reference will re-stat */
177 /* now find the disk cache entries */
178 afs_TryToSmush(avc, acred, 1);
179 osi_dnlc_purgedp(avc);
180 if (avc->linkData && !(avc->f.states & CCore)) {
181 afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
182 avc->linkData = NULL;
185 avc->cachingStates |= FCSBypass; /* Set the bypass flag */
187 avc->cachingStates |= FCSDesireBypass;
189 avc->cachingStates |= FCSManuallySet;
190 avc->cachingTransitions++;
193 ReleaseWriteLock(&avc->lock);
194 #ifdef AFS_BOZONLOCK_ENV
195 afs_BozonUnlock(&avc->pvnLock, avc);
202 * This is almost exactly like the PFlush() routine in afs_pioctl.c,
203 * but that routine is static. We are about to change a file from
204 * bypassing caching to normal caching. Therefore, we want to
205 * throw out any existing VM pages for the file. We keep track of
206 * the number of times we go back and forth from caching to bypass.
209 afs_TransitionToCaching(struct vcache *avc,
219 if (aflags & TRANSChangeDesiredBit)
221 if (aflags & TRANSSetManualBit)
224 #ifdef AFS_BOZONLOCK_ENV
225 afs_BozonLock(&avc->pvnLock, avc); /* Since afs_TryToSmush will do a pvn_vptrunc */
229 ObtainWriteLock(&avc->lock, 926);
231 * Someone may have beat us to doing the transition - we had no lock
232 * when we checked the flag earlier. No cause to panic, just return.
234 if (!(avc->cachingStates & FCSBypass))
237 /* Ok, we actually do need to flush */
238 ObtainWriteLock(&afs_xcbhash, 957);
239 afs_DequeueCallback(avc);
240 avc->f.states &= ~(CStatd | CDirty); /* next reference will re-stat cache entry */
241 ReleaseWriteLock(&afs_xcbhash);
242 /* now find the disk cache entries */
243 afs_TryToSmush(avc, acred, 1);
244 osi_dnlc_purgedp(avc);
245 if (avc->linkData && !(avc->f.states & CCore)) {
246 afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
247 avc->linkData = NULL;
250 avc->cachingStates &= ~(FCSBypass); /* Reset the bypass flag */
252 avc->cachingStates &= ~(FCSDesireBypass);
254 avc->cachingStates |= FCSManuallySet;
255 avc->cachingTransitions++;
258 ReleaseWriteLock(&avc->lock);
259 #ifdef AFS_BOZONLOCK_ENV
260 afs_BozonUnlock(&avc->pvnLock, avc);
266 /* In the case where there's an error in afs_NoCacheFetchProc or
267 * afs_PrefetchNoCache, all of the pages they've been passed need
271 typedef void * bypass_page_t;
273 #define copy_page(pp, pageoff, rxiov, iovno, iovoff, auio, curiov) \
275 int dolen = auio->uio_iov[curiov].iov_len - pageoff; \
276 memcpy(((char *)pp) + pageoff, \
277 ((char *)rxiov[iovno].iov_base) + iovoff, dolen); \
278 auio->uio_resid -= dolen; \
281 #define copy_pages(pp, pageoff, rxiov, iovno, iovoff, auio, curiov) \
283 int dolen = rxiov[iovno].iov_len - iovoff; \
284 memcpy(((char *)pp) + pageoff, \
285 ((char *)rxiov[iovno].iov_base) + iovoff, dolen); \
286 auio->uio_resid -= dolen; \
289 #define unlock_and_release_pages(auio)
290 #define release_full_page(pp, pageoff)
293 typedef struct page * bypass_page_t;
295 #define copy_page(pp, pageoff, rxiov, iovno, iovoff, auio, curiov) \
298 int dolen = auio->uio_iov[curiov].iov_len - pageoff; \
299 address = kmap_atomic(pp, KM_USER0); \
300 memcpy(address + pageoff, \
301 (char *)(rxiov[iovno].iov_base) + iovoff, dolen); \
302 kunmap_atomic(address, KM_USER0); \
305 #define copy_pages(pp, pageoff, rxiov, iovno, iovoff, auio, curiov) \
308 int dolen = rxiov[iovno].iov_len - iovoff; \
309 address = kmap_atomic(pp, KM_USER0); \
310 memcpy(address + pageoff, \
311 (char *)(rxiov[iovno].iov_base) + iovoff, dolen); \
312 kunmap_atomic(address, KM_USER0); \
316 #define unlock_and_release_pages(auio) \
318 struct iovec *ciov; \
321 afs_int32 iovno = 0; \
322 ciov = auio->uio_iov; \
323 iovmax = auio->uio_iovcnt - 1; \
324 pp = (bypass_page_t) ciov->iov_base; \
327 if (PageLocked(pp)) \
329 put_page(pp); /* decrement refcount */ \
334 ciov = (auio->uio_iov + iovno); \
335 pp = (bypass_page_t) ciov->iov_base; \
339 #define release_full_page(pp, pageoff) \
341 /* this is appropriate when no caller intends to unlock \
342 * and release the page */ \
343 SetPageUptodate(pp); \
347 afs_warn("afs_NoCacheFetchProc: page not locked!\n"); \
348 put_page(pp); /* decrement refcount */ \
353 /* no-cache prefetch routine */
355 afs_NoCacheFetchProc(struct rx_call *acall,
358 afs_int32 release_pages,
363 int moredata, iovno, iovoff, iovmax, result, locked;
372 rxiov = osi_AllocSmallSpace(sizeof(struct iovec) * RX_MAXIOVECS);
373 ciov = auio->uio_iov;
374 pp = (bypass_page_t) ciov->iov_base;
375 iovmax = auio->uio_iovcnt - 1;
376 iovno = iovoff = result = 0;
379 COND_GUNLOCK(locked);
380 code = rx_Read(acall, (char *)&length, sizeof(afs_int32));
381 COND_RE_GLOCK(locked);
382 if (code != sizeof(afs_int32)) {
384 afs_warn("Preread error. code: %d instead of %d\n",
385 code, (int)sizeof(afs_int32));
386 unlock_and_release_pages(auio);
389 length = ntohl(length);
393 afs_warn("Preread error. Got length %d, which is greater than size %d\n",
395 unlock_and_release_pages(auio);
399 /* If we get a 0 length reply, time to cleanup and return */
401 unlock_and_release_pages(auio);
407 * The fetch protocol is extended for the AFS/DFS translator
408 * to allow multiple blocks of data, each with its own length,
409 * to be returned. As long as the top bit is set, there are more
412 * We do not do this for AFS file servers because they sometimes
413 * return large negative numbers as the transfer size.
415 if (avc->f.states & CForeign) {
416 moredata = length & 0x80000000;
417 length &= ~0x80000000;
422 for (curpage = 0; curpage <= iovmax; curpage++) {
424 /* properly, this should track uio_resid, not a fixed page size! */
425 while (pageoff < auio->uio_iov[curpage].iov_len) {
426 /* If no more iovs, issue new read. */
428 COND_GUNLOCK(locked);
429 bytes = rx_Readv(acall, rxiov, &nio, RX_MAXIOVECS, length);
430 COND_RE_GLOCK(locked);
432 afs_warn("afs_NoCacheFetchProc: rx_Read error. Return code was %d\n", bytes);
434 unlock_and_release_pages(auio);
436 } else if (bytes == 0) {
437 /* we failed to read the full length */
439 afs_warn("afs_NoCacheFetchProc: rx_Read returned zero. Aborting.\n");
440 unlock_and_release_pages(auio);
446 pp = (bypass_page_t)auio->uio_iov[curpage].iov_base;
447 if (pageoff + (rxiov[iovno].iov_len - iovoff) <= auio->uio_iov[curpage].iov_len) {
448 /* Copy entire (or rest of) current iovec into current page */
450 copy_pages(pp, pageoff, rxiov, iovno, iovoff, auio, curpage);
451 length -= (rxiov[iovno].iov_len - iovoff);
452 pageoff += rxiov[iovno].iov_len - iovoff;
456 /* Copy only what's needed to fill current page */
458 copy_page(pp, pageoff, rxiov, iovno, iovoff, auio, curpage);
459 length -= (auio->uio_iov[curpage].iov_len - pageoff);
460 iovoff += auio->uio_iov[curpage].iov_len - pageoff;
461 pageoff = auio->uio_iov[curpage].iov_len;
464 /* we filled a page, or this is the last page. conditionally release it */
465 if (pp && ((pageoff == auio->uio_iov[curpage].iov_len &&
466 release_pages) || (length == 0 && iovno >= nio)))
467 release_full_page(pp, pageoff);
469 if (length == 0 && iovno >= nio)
476 osi_FreeSmallSpace(rxiov);
481 /* dispatch a no-cache read request */
483 afs_ReadNoCache(struct vcache *avc,
484 struct nocache_read_request *bparms,
489 struct brequest *breq;
490 struct vrequest *areq;
492 /* the reciever will free this */
493 areq = osi_Alloc(sizeof(struct vrequest));
497 afs_warn("afs_ReadNoCache VCache Error!\n");
500 if ((code = afs_InitReq(areq, acred))) {
501 afs_warn("afs_ReadNoCache afs_InitReq error!\n");
506 code = afs_VerifyVCache(avc, areq);
510 code = afs_CheckCode(code, areq, 11); /* failed to get it */
511 afs_warn("afs_ReadNoCache Failed to verify VCache!\n");
517 /* and queue this one */
521 breq = afs_BQueue(BOP_FETCH_NOCACHE, avc, B_DONTWAIT, 0, acred, 1, 1,
522 bparms, (void *)0, (void *)0);
527 afs_osi_Wait(10 * bcnt, 0, 0);
539 /* If there's a problem before we queue the request, we need to
540 * do everything that would normally happen when the request was
541 * processed, like unlocking the pages and freeing memory.
543 unlock_and_release_pages(bparms->auio);
544 osi_Free(areq, sizeof(struct vrequest));
545 osi_Free(bparms->auio->uio_iov,
546 bparms->auio->uio_iovcnt * sizeof(struct iovec));
547 osi_Free(bparms->auio, sizeof(struct uio));
548 osi_Free(bparms, sizeof(struct nocache_read_request));
553 /* Cannot have static linkage--called from BPrefetch (afs_daemons) */
555 afs_PrefetchNoCache(struct vcache *avc,
557 struct nocache_read_request *bparms)
561 struct iovec *iovecp;
563 struct vrequest *areq;
565 struct rx_connection *rxconn;
566 #ifdef AFS_64BIT_CLIENT
567 afs_int32 length_hi, bytes, locked;
571 struct rx_call *tcall;
573 struct AFSVolSync tsync;
574 struct AFSFetchStatus OutStatus;
575 struct AFSCallBack CallBack;
577 struct tlocal1 *tcallspec;
582 iovecp = auio->uio_iov;
585 tcallspec = osi_Alloc(sizeof(struct tlocal1));
587 tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK /* ignored */, &rxconn);
589 avc->callback = tc->parent->srvr->server;
590 tcall = rx_NewCall(rxconn);
591 #ifdef AFS_64BIT_CLIENT
592 if (!afs_serverHasNo64Bit(tc)) {
593 code = StartRXAFS_FetchData64(tcall,
594 (struct AFSFid *) &avc->f.fid.Fid,
598 COND_GUNLOCK(locked);
599 bytes = rx_Read(tcall, (char *)&length_hi,
601 COND_RE_GLOCK(locked);
603 if (bytes != sizeof(afs_int32)) {
605 code = rx_Error(tcall);
606 COND_GUNLOCK(locked);
607 code = rx_EndCall(tcall, code);
608 COND_RE_GLOCK(locked);
612 } /* afs_serverHasNo64Bit */
613 if (code == RXGEN_OPCODE || afs_serverHasNo64Bit(tc)) {
614 if (auio->uio_offset > 0x7FFFFFFF) {
618 pos = auio->uio_offset;
619 COND_GUNLOCK(locked);
621 tcall = rx_NewCall(rxconn);
622 code = StartRXAFS_FetchData(tcall,
623 (struct AFSFid *) &avc->f.fid.Fid,
624 pos, bparms->length);
625 COND_RE_GLOCK(locked);
627 afs_serverSetNo64Bit(tc);
630 code = StartRXAFS_FetchData(tcall,
631 (struct AFSFid *) &avc->f.fid.Fid,
632 auio->uio_offset, bparms->length);
635 code = afs_NoCacheFetchProc(tcall, avc, auio,
636 1 /* release_pages */,
639 afs_warn("BYPASS: StartRXAFS_FetchData failed: %d\n", code);
640 unlock_and_release_pages(auio);
644 code = EndRXAFS_FetchData(tcall, &tcallspec->OutStatus,
645 &tcallspec->CallBack,
648 afs_warn("BYPASS: NoCacheFetchProc failed: %d\n", code);
650 code = rx_EndCall(tcall, code);
652 afs_warn("BYPASS: No connection.\n");
654 unlock_and_release_pages(auio);
657 } while (afs_Analyze(tc, rxconn, code, &avc->f.fid, areq,
658 AFS_STATS_FS_RPCIDX_FETCHDATA,
662 * Copy appropriate fields into vcache
666 afs_ProcessFS(avc, &tcallspec->OutStatus, areq);
668 osi_Free(areq, sizeof(struct vrequest));
669 osi_Free(tcallspec, sizeof(struct tlocal1));
670 osi_Free(bparms, sizeof(struct nocache_read_request));
672 /* in UKERNEL, the "pages" are passed in */
673 osi_Free(iovecp, auio->uio_iovcnt * sizeof(struct iovec));
674 osi_Free(auio, sizeof(struct uio));