2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
11 * Linux specific vnodeops. Also includes the glue routines required to call
14 * So far the only truly scary part is that Linux relies on the inode cache
15 * to be up to date. Don't you dare break a callback and expect an fstat
16 * to give you meaningful information. This appears to be fixed in the 2.1
17 * development kernels. As it is we can fix this now by intercepting the
21 #include <afsconfig.h>
22 #include "afs/param.h"
25 #include "afs/sysincludes.h"
26 #include "afsincludes.h"
27 #include "afs/afs_stats.h"
29 #ifdef HAVE_MM_INLINE_H
30 #include "h/mm_inline.h"
32 #include "h/pagemap.h"
33 #include "h/smp_lock.h"
34 #include "h/writeback.h"
35 #include "h/pagevec.h"
36 #if defined(AFS_CACHE_BYPASS)
38 #include "afs/afs_bypasscache.h"
41 #include "osi_compat.h"
42 #include "osi_pagecopy.h"
45 #define pageoff(pp) pgoff2loff((pp)->index)
47 #define pageoff(pp) pp->offset
50 #ifndef HAVE_PAGEVEC_LRU_ADD_FILE
51 #define __pagevec_lru_add_file __pagevec_lru_add
55 #define MAX_ERRNO 1000L
58 #define LockPage(pp) lock_page(pp)
59 #define UnlockPage(pp) unlock_page(pp)
60 extern struct backing_dev_info afs_backing_dev_info;
62 extern struct vcache *afs_globalVp;
63 extern int afs_notify_change(struct dentry *dp, struct iattr *iattrp);
64 /* Some uses of BKL are perhaps not needed for bypass or memcache--
65 * why don't we try it out? */
66 extern struct afs_cacheOps afs_UfsCacheOps;
69 afs_maybe_lock_kernel(void) {
70 if(afs_cacheType == &afs_UfsCacheOps)
75 afs_maybe_unlock_kernel(void) {
76 if(afs_cacheType == &afs_UfsCacheOps)
80 /* This function converts a positive error code from AFS into a negative
81 * code suitable for passing into the Linux VFS layer. It checks that the
82 * error code is within the permissable bounds for the ERR_PTR mechanism.
84 * _All_ error codes which come from the AFS layer should be passed through
85 * this function before being returned to the kernel.
89 afs_convert_code(int code) {
90 if ((code >= 0) && (code <= MAX_ERRNO))
96 /* Linux doesn't require a credp for many functions, and crref is an expensive
97 * operation. This helper function avoids obtaining it for VerifyVCache calls
101 afs_linux_VerifyVCache(struct vcache *avc, cred_t **retcred) {
102 cred_t *credp = NULL;
103 struct vrequest treq;
106 if (avc->f.states & CStatd) {
114 code = afs_InitReq(&treq, credp);
116 code = afs_VerifyVCache2(avc, &treq);
123 return afs_convert_code(code);
127 afs_linux_read(struct file *fp, char *buf, size_t count, loff_t * offp)
130 struct vcache *vcp = VTOAFS(fp->f_dentry->d_inode);
133 afs_Trace4(afs_iclSetp, CM_TRACE_READOP, ICL_TYPE_POINTER, vcp,
134 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
136 code = afs_linux_VerifyVCache(vcp, NULL);
139 /* Linux's FlushPages implementation doesn't ever use credp,
140 * so we optimise by not using it */
141 osi_FlushPages(vcp, NULL); /* ensure stale pages are gone */
143 code = do_sync_read(fp, buf, count, offp);
147 afs_Trace4(afs_iclSetp, CM_TRACE_READOP, ICL_TYPE_POINTER, vcp,
148 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
155 /* Now we have integrated VM for writes as well as reads. generic_file_write
156 * also takes care of re-positioning the pointer if file is open in append
157 * mode. Call fake open/close to ensure we do writes of core dumps.
160 afs_linux_write(struct file *fp, const char *buf, size_t count, loff_t * offp)
163 struct vcache *vcp = VTOAFS(fp->f_dentry->d_inode);
168 afs_Trace4(afs_iclSetp, CM_TRACE_WRITEOP, ICL_TYPE_POINTER, vcp,
169 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
170 (fp->f_flags & O_APPEND) ? 99998 : 99999);
172 code = afs_linux_VerifyVCache(vcp, &credp);
174 ObtainWriteLock(&vcp->lock, 529);
176 ReleaseWriteLock(&vcp->lock);
179 code = do_sync_write(fp, buf, count, offp);
183 ObtainWriteLock(&vcp->lock, 530);
185 if (vcp->execsOrWriters == 1 && !credp)
188 afs_FakeClose(vcp, credp);
189 ReleaseWriteLock(&vcp->lock);
191 afs_Trace4(afs_iclSetp, CM_TRACE_WRITEOP, ICL_TYPE_POINTER, vcp,
192 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
201 extern int BlobScan(struct dcache * afile, afs_int32 ablob);
203 /* This is a complete rewrite of afs_readdir, since we can make use of
204 * filldir instead of afs_readdir_move. Note that changes to vcache/dcache
205 * handling and use of bulkstats will need to be reflected here as well.
208 afs_linux_readdir(struct file *fp, void *dirbuf, filldir_t filldir)
210 struct vcache *avc = VTOAFS(FILE_INODE(fp));
211 struct vrequest treq;
212 register struct dcache *tdc;
219 afs_size_t origOffset, tlen;
220 cred_t *credp = crref();
221 struct afs_fakestat_state fakestat;
223 afs_maybe_lock_kernel();
225 AFS_STATCNT(afs_readdir);
227 code = afs_convert_code(afs_InitReq(&treq, credp));
232 afs_InitFakeStat(&fakestat);
233 code = afs_convert_code(afs_EvalFakeStat(&avc, &fakestat, &treq));
237 /* update the cache entry */
239 code = afs_convert_code(afs_VerifyVCache2(avc, &treq));
243 /* get a reference to the entire directory */
244 tdc = afs_GetDCache(avc, (afs_size_t) 0, &treq, &origOffset, &tlen, 1);
250 ObtainSharedLock(&avc->lock, 810);
251 UpgradeSToWLock(&avc->lock, 811);
252 ObtainReadLock(&tdc->lock);
254 * Make sure that the data in the cache is current. There are two
255 * cases we need to worry about:
256 * 1. The cache data is being fetched by another process.
257 * 2. The cache data is no longer valid
259 while ((avc->f.states & CStatd)
260 && (tdc->dflags & DFFetching)
261 && hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
262 ReleaseReadLock(&tdc->lock);
263 ReleaseSharedLock(&avc->lock);
264 afs_osi_Sleep(&tdc->validPos);
265 ObtainSharedLock(&avc->lock, 812);
266 ObtainReadLock(&tdc->lock);
268 if (!(avc->f.states & CStatd)
269 || !hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
270 ReleaseReadLock(&tdc->lock);
271 ReleaseSharedLock(&avc->lock);
276 /* Set the readdir-in-progress flag, and downgrade the lock
277 * to shared so others will be able to acquire a read lock.
279 avc->f.states |= CReadDir;
280 avc->dcreaddir = tdc;
281 avc->readdir_pid = MyPidxx2Pid(MyPidxx);
282 ConvertWToSLock(&avc->lock);
284 /* Fill in until we get an error or we're done. This implementation
285 * takes an offset in units of blobs, rather than bytes.
288 offset = (int) fp->f_pos;
290 dirpos = BlobScan(tdc, offset);
294 de = afs_dir_GetBlob(tdc, dirpos);
298 ino = afs_calc_inum (avc->f.fid.Fid.Volume, ntohl(de->fid.vnode));
301 len = strlen(de->name);
303 printf("afs_linux_readdir: afs_dir_GetBlob failed, null name (inode %lx, dirpos %d)\n",
304 (unsigned long)&tdc->f.inode, dirpos);
306 ReleaseSharedLock(&avc->lock);
312 /* filldir returns -EINVAL when the buffer is full. */
314 unsigned int type = DT_UNKNOWN;
315 struct VenusFid afid;
318 afid.Cell = avc->f.fid.Cell;
319 afid.Fid.Volume = avc->f.fid.Fid.Volume;
320 afid.Fid.Vnode = ntohl(de->fid.vnode);
321 afid.Fid.Unique = ntohl(de->fid.vunique);
322 if ((avc->f.states & CForeign) == 0 && (ntohl(de->fid.vnode) & 1)) {
324 } else if ((tvc = afs_FindVCache(&afid, 0, 0))) {
327 } else if (((tvc->f.states) & (CStatd | CTruth))) {
328 /* CTruth will be set if the object has
333 else if (vtype == VREG)
335 /* Don't do this until we're sure it can't be a mtpt */
336 /* else if (vtype == VLNK)
338 /* what other types does AFS support? */
340 /* clean up from afs_FindVCache */
344 * If this is NFS readdirplus, then the filler is going to
345 * call getattr on this inode, which will deadlock if we're
349 code = (*filldir) (dirbuf, de->name, len, offset, ino, type);
355 offset = dirpos + 1 + ((len + 16) >> 5);
357 /* If filldir didn't fill in the last one this is still pointing to that
360 fp->f_pos = (loff_t) offset;
362 ReleaseReadLock(&tdc->lock);
364 UpgradeSToWLock(&avc->lock, 813);
365 avc->f.states &= ~CReadDir;
367 avc->readdir_pid = 0;
368 ReleaseSharedLock(&avc->lock);
372 afs_PutFakeStat(&fakestat);
375 afs_maybe_unlock_kernel();
380 /* in afs_pioctl.c */
381 extern int afs_xioctl(struct inode *ip, struct file *fp, unsigned int com,
384 #if defined(HAVE_UNLOCKED_IOCTL) || defined(HAVE_COMPAT_IOCTL)
385 static long afs_unlocked_xioctl(struct file *fp, unsigned int com,
387 return afs_xioctl(FILE_INODE(fp), fp, com, arg);
394 afs_linux_mmap(struct file *fp, struct vm_area_struct *vmap)
396 struct vcache *vcp = VTOAFS(FILE_INODE(fp));
400 afs_Trace3(afs_iclSetp, CM_TRACE_GMAP, ICL_TYPE_POINTER, vcp,
401 ICL_TYPE_POINTER, vmap->vm_start, ICL_TYPE_INT32,
402 vmap->vm_end - vmap->vm_start);
404 /* get a validated vcache entry */
405 code = afs_linux_VerifyVCache(vcp, NULL);
407 /* Linux's Flushpage implementation doesn't use credp, so optimise
408 * our code to not need to crref() it */
409 osi_FlushPages(vcp, NULL); /* ensure stale pages are gone */
411 code = generic_file_mmap(fp, vmap);
414 vcp->f.states |= CMAPPED;
421 afs_linux_open(struct inode *ip, struct file *fp)
423 struct vcache *vcp = VTOAFS(ip);
424 cred_t *credp = crref();
427 afs_maybe_lock_kernel();
429 code = afs_open(&vcp, fp->f_flags, credp);
431 afs_maybe_unlock_kernel();
434 return afs_convert_code(code);
438 afs_linux_release(struct inode *ip, struct file *fp)
440 struct vcache *vcp = VTOAFS(ip);
441 cred_t *credp = crref();
444 afs_maybe_lock_kernel();
446 code = afs_close(vcp, fp->f_flags, credp);
448 afs_maybe_unlock_kernel();
451 return afs_convert_code(code);
455 afs_linux_fsync(struct file *fp, struct dentry *dp, int datasync)
458 struct inode *ip = FILE_INODE(fp);
459 cred_t *credp = crref();
461 afs_maybe_lock_kernel();
463 code = afs_fsync(VTOAFS(ip), credp);
465 afs_maybe_unlock_kernel();
467 return afs_convert_code(code);
473 afs_linux_lock(struct file *fp, int cmd, struct file_lock *flp)
476 struct vcache *vcp = VTOAFS(FILE_INODE(fp));
477 cred_t *credp = crref();
478 struct AFS_FLOCK flock;
480 /* Convert to a lock format afs_lockctl understands. */
481 memset((char *)&flock, 0, sizeof(flock));
482 flock.l_type = flp->fl_type;
483 flock.l_pid = flp->fl_pid;
485 flock.l_start = flp->fl_start;
486 flock.l_len = flp->fl_end - flp->fl_start + 1;
488 /* Safe because there are no large files, yet */
489 #if defined(F_GETLK64) && (F_GETLK != F_GETLK64)
490 if (cmd == F_GETLK64)
492 else if (cmd == F_SETLK64)
494 else if (cmd == F_SETLKW64)
496 #endif /* F_GETLK64 && F_GETLK != F_GETLK64 */
499 code = afs_lockctl(vcp, &flock, cmd, credp);
502 if ((code == 0 || flp->fl_type == F_UNLCK) &&
503 (cmd == F_SETLK || cmd == F_SETLKW)) {
504 code = afs_posix_lock_file(fp, flp);
505 if (code && flp->fl_type != F_UNLCK) {
506 struct AFS_FLOCK flock2;
508 flock2.l_type = F_UNLCK;
510 afs_lockctl(vcp, &flock2, F_SETLK, credp);
514 /* If lockctl says there are no conflicting locks, then also check with the
515 * kernel, as lockctl knows nothing about byte range locks
517 if (code == 0 && cmd == F_GETLK && flock.l_type == F_UNLCK) {
518 afs_posix_test_lock(fp, flp);
519 /* If we found a lock in the kernel's structure, return it */
520 if (flp->fl_type != F_UNLCK) {
526 /* Convert flock back to Linux's file_lock */
527 flp->fl_type = flock.l_type;
528 flp->fl_pid = flock.l_pid;
529 flp->fl_start = flock.l_start;
530 flp->fl_end = flock.l_start + flock.l_len - 1;
533 return afs_convert_code(code);
536 #ifdef STRUCT_FILE_OPERATIONS_HAS_FLOCK
538 afs_linux_flock(struct file *fp, int cmd, struct file_lock *flp) {
540 struct vcache *vcp = VTOAFS(FILE_INODE(fp));
541 cred_t *credp = crref();
542 struct AFS_FLOCK flock;
543 /* Convert to a lock format afs_lockctl understands. */
544 memset((char *)&flock, 0, sizeof(flock));
545 flock.l_type = flp->fl_type;
546 flock.l_pid = flp->fl_pid;
549 flock.l_len = OFFSET_MAX;
551 /* Safe because there are no large files, yet */
552 #if defined(F_GETLK64) && (F_GETLK != F_GETLK64)
553 if (cmd == F_GETLK64)
555 else if (cmd == F_SETLK64)
557 else if (cmd == F_SETLKW64)
559 #endif /* F_GETLK64 && F_GETLK != F_GETLK64 */
562 code = afs_lockctl(vcp, &flock, cmd, credp);
565 if ((code == 0 || flp->fl_type == F_UNLCK) &&
566 (cmd == F_SETLK || cmd == F_SETLKW)) {
567 flp->fl_flags &=~ FL_SLEEP;
568 code = flock_lock_file_wait(fp, flp);
569 if (code && flp->fl_type != F_UNLCK) {
570 struct AFS_FLOCK flock2;
572 flock2.l_type = F_UNLCK;
574 afs_lockctl(vcp, &flock2, F_SETLK, credp);
578 /* Convert flock back to Linux's file_lock */
579 flp->fl_type = flock.l_type;
580 flp->fl_pid = flock.l_pid;
583 return afs_convert_code(code);
588 * essentially the same as afs_fsync() but we need to get the return
589 * code for the sys_close() here, not afs_linux_release(), so call
590 * afs_StoreAllSegments() with AFS_LASTSTORE
593 #if defined(FOP_FLUSH_TAKES_FL_OWNER_T)
594 afs_linux_flush(struct file *fp, fl_owner_t id)
596 afs_linux_flush(struct file *fp)
599 struct vrequest treq;
603 #if defined(AFS_CACHE_BYPASS)
609 if ((fp->f_flags & O_ACCMODE) == O_RDONLY) { /* readers dont flush */
617 vcp = VTOAFS(FILE_INODE(fp));
619 code = afs_InitReq(&treq, credp);
622 #if defined(AFS_CACHE_BYPASS)
623 /* If caching is bypassed for this file, or globally, just return 0 */
624 if(cache_bypass_strategy == ALWAYS_BYPASS_CACHE)
627 ObtainReadLock(&vcp->lock);
628 if(vcp->cachingStates & FCSBypass)
630 ReleaseReadLock(&vcp->lock);
633 /* future proof: don't rely on 0 return from afs_InitReq */
638 ObtainSharedLock(&vcp->lock, 535);
639 if ((vcp->execsOrWriters > 0) && (file_count(fp) == 1)) {
640 UpgradeSToWLock(&vcp->lock, 536);
641 if (!AFS_IS_DISCONNECTED) {
642 code = afs_StoreAllSegments(vcp,
644 AFS_SYNC | AFS_LASTSTORE);
646 afs_DisconAddDirty(vcp, VDisconWriteOsiFlush, 1);
648 ConvertWToSLock(&vcp->lock);
650 code = afs_CheckCode(code, &treq, 54);
651 ReleaseSharedLock(&vcp->lock);
658 return afs_convert_code(code);
661 struct file_operations afs_dir_fops = {
662 .read = generic_read_dir,
663 .readdir = afs_linux_readdir,
664 #ifdef HAVE_UNLOCKED_IOCTL
665 .unlocked_ioctl = afs_unlocked_xioctl,
669 #ifdef HAVE_COMPAT_IOCTL
670 .compat_ioctl = afs_unlocked_xioctl,
672 .open = afs_linux_open,
673 .release = afs_linux_release,
676 struct file_operations afs_file_fops = {
677 .read = afs_linux_read,
678 .write = afs_linux_write,
679 #ifdef GENERIC_FILE_AIO_READ
680 .aio_read = generic_file_aio_read,
681 .aio_write = generic_file_aio_write,
683 #ifdef HAVE_UNLOCKED_IOCTL
684 .unlocked_ioctl = afs_unlocked_xioctl,
688 #ifdef HAVE_COMPAT_IOCTL
689 .compat_ioctl = afs_unlocked_xioctl,
691 .mmap = afs_linux_mmap,
692 .open = afs_linux_open,
693 .flush = afs_linux_flush,
694 #if defined(STRUCT_FILE_OPERATIONS_HAS_SENDFILE)
695 .sendfile = generic_file_sendfile,
697 #if defined(STRUCT_FILE_OPERATIONS_HAS_SPLICE)
698 .splice_write = generic_file_splice_write,
699 .splice_read = generic_file_splice_read,
701 .release = afs_linux_release,
702 .fsync = afs_linux_fsync,
703 .lock = afs_linux_lock,
704 #ifdef STRUCT_FILE_OPERATIONS_HAS_FLOCK
705 .flock = afs_linux_flock,
710 /**********************************************************************
711 * AFS Linux dentry operations
712 **********************************************************************/
714 /* check_bad_parent() : Checks if this dentry's vcache is a root vcache
715 * that has its mvid (parent dir's fid) pointer set to the wrong directory
716 * due to being mounted in multiple points at once. If so, check_bad_parent()
717 * calls afs_lookup() to correct the vcache's mvid, as well as the volume's
718 * dotdotfid and mtpoint fid members.
720 * dp - dentry to be checked.
724 * This dentry's vcache's mvid will be set to the correct parent directory's
726 * This root vnode's volume will have its dotdotfid and mtpoint fids set
727 * to the correct parent and mountpoint fids.
731 check_bad_parent(struct dentry *dp)
734 struct vcache *vcp = VTOAFS(dp->d_inode), *avc = NULL;
735 struct vcache *pvc = VTOAFS(dp->d_parent->d_inode);
737 if (vcp->mvid->Fid.Volume != pvc->f.fid.Fid.Volume) { /* bad parent */
740 /* force a lookup, so vcp->mvid is fixed up */
741 afs_lookup(pvc, (char *)dp->d_name.name, &avc, credp);
742 if (!avc || vcp != avc) { /* bad, very bad.. */
743 afs_Trace4(afs_iclSetp, CM_TRACE_TMP_1S3L, ICL_TYPE_STRING,
744 "check_bad_parent: bad pointer returned from afs_lookup origvc newvc dentry",
745 ICL_TYPE_POINTER, vcp, ICL_TYPE_POINTER, avc,
746 ICL_TYPE_POINTER, dp);
749 AFS_RELE(AFSTOV(avc));
756 /* afs_linux_revalidate
757 * Ensure vcache is stat'd before use. Return 0 if entry is valid.
760 afs_linux_revalidate(struct dentry *dp)
763 struct vcache *vcp = VTOAFS(dp->d_inode);
767 if (afs_shuttingdown)
770 afs_maybe_lock_kernel();
774 /* Make this a fast path (no crref), since it's called so often. */
775 if (vcp->f.states & CStatd) {
777 if (*dp->d_name.name != '/' && vcp->mvstat == 2) /* root vnode */
778 check_bad_parent(dp); /* check and correct mvid */
786 /* This avoids the crref when we don't have to do it. Watch for
787 * changes in afs_getattr that don't get replicated here!
789 if (vcp->f.states & CStatd &&
790 (!afs_fakestat_enable || vcp->mvstat != 1) &&
792 code = afs_CopyOutAttrs(vcp, &vattr);
795 code = afs_getattr(vcp, &vattr, credp);
799 afs_fill_inode(AFSTOV(vcp), &vattr);
802 afs_maybe_unlock_kernel();
804 return afs_convert_code(code);
808 afs_linux_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
810 int err = afs_linux_revalidate(dentry);
812 generic_fillattr(dentry->d_inode, stat);
817 /* Validate a dentry. Return 1 if unchanged, 0 if VFS layer should re-evaluate.
818 * In kernels 2.2.10 and above, we are passed an additional flags var which
819 * may have either the LOOKUP_FOLLOW OR LOOKUP_DIRECTORY set in which case
820 * we are advised to follow the entry if it is a link or to make sure that
821 * it is a directory. But since the kernel itself checks these possibilities
822 * later on, we shouldn't have to do it until later. Perhaps in the future..
825 #ifdef DOP_REVALIDATE_TAKES_NAMEIDATA
826 afs_linux_dentry_revalidate(struct dentry *dp, struct nameidata *nd)
828 afs_linux_dentry_revalidate(struct dentry *dp, int flags)
832 cred_t *credp = NULL;
833 struct vcache *vcp, *pvcp, *tvc = NULL;
835 struct afs_fakestat_state fakestate;
837 afs_maybe_lock_kernel();
839 afs_InitFakeStat(&fakestate);
843 vcp = VTOAFS(dp->d_inode);
844 pvcp = VTOAFS(dp->d_parent->d_inode); /* dget_parent()? */
846 if (vcp == afs_globalVp)
849 if (vcp->mvstat == 1) { /* mount point */
850 if (vcp->mvid && (vcp->f.states & CMValid)) {
853 struct vrequest treq;
856 code = afs_InitReq(&treq, credp);
858 (strcmp(dp->d_name.name, ".directory") == 0)) {
862 code = afs_TryEvalFakeStat(&vcp, &fakestate, &treq);
864 code = afs_EvalFakeStat(&vcp, &fakestate, &treq);
865 if ((tryEvalOnly && vcp->mvstat == 1) || code) {
866 /* a mount point, not yet replaced by its directory */
871 if (*dp->d_name.name != '/' && vcp->mvstat == 2) /* root vnode */
872 check_bad_parent(dp); /* check and correct mvid */
875 /* If the last looker changes, we should make sure the current
876 * looker still has permission to examine this file. This would
877 * always require a crref() which would be "slow".
879 if (vcp->last_looker != treq.uid) {
880 if (!afs_AccessOK(vcp, (vType(vcp) == VREG) ? PRSFS_READ : PRSFS_LOOKUP, &treq, CHECK_MODE_BITS))
883 vcp->last_looker = treq.uid;
887 /* If the parent's DataVersion has changed or the vnode
888 * is longer valid, we need to do a full lookup. VerifyVCache
889 * isn't enough since the vnode may have been renamed.
892 if (hgetlo(pvcp->f.m.DataVersion) > dp->d_time || !(vcp->f.states & CStatd)) {
895 afs_lookup(pvcp, (char *)dp->d_name.name, &tvc, credp);
896 if (!tvc || tvc != vcp)
899 if (afs_getattr(vcp, &vattr, credp))
902 vattr2inode(AFSTOV(vcp), &vattr);
903 dp->d_time = hgetlo(pvcp->f.m.DataVersion);
906 /* should we always update the attributes at this point? */
907 /* unlikely--the vcache entry hasn't changed */
911 pvcp = VTOAFS(dp->d_parent->d_inode); /* dget_parent()? */
912 if (hgetlo(pvcp->f.m.DataVersion) > dp->d_time)
916 /* No change in parent's DataVersion so this negative
917 * lookup is still valid. BUT, if a server is down a
918 * negative lookup can result so there should be a
919 * liftime as well. For now, always expire.
932 afs_PutFakeStat(&fakestate);
938 shrink_dcache_parent(dp);
941 afs_maybe_unlock_kernel();
945 if (have_submounts(dp))
953 afs_dentry_iput(struct dentry *dp, struct inode *ip)
955 struct vcache *vcp = VTOAFS(ip);
958 if (!AFS_IS_DISCONNECTED || (vcp->f.states & CUnlinked)) {
959 (void) afs_InactiveVCache(vcp, NULL);
962 afs_linux_clear_nfsfs_renamed(dp);
968 afs_dentry_delete(struct dentry *dp)
970 if (dp->d_inode && (VTOAFS(dp->d_inode)->f.states & CUnlinked))
971 return 1; /* bad inode? */
976 struct dentry_operations afs_dentry_operations = {
977 .d_revalidate = afs_linux_dentry_revalidate,
978 .d_delete = afs_dentry_delete,
979 .d_iput = afs_dentry_iput,
982 /**********************************************************************
983 * AFS Linux inode operations
984 **********************************************************************/
988 * Merely need to set enough of vattr to get us through the create. Note
989 * that the higher level code (open_namei) will take care of any tuncation
990 * explicitly. Exclusive open is also taken care of in open_namei.
992 * name is in kernel space at this point.
995 #ifdef IOP_CREATE_TAKES_NAMEIDATA
996 afs_linux_create(struct inode *dip, struct dentry *dp, int mode,
997 struct nameidata *nd)
999 afs_linux_create(struct inode *dip, struct dentry *dp, int mode)
1003 cred_t *credp = crref();
1004 const char *name = dp->d_name.name;
1009 vattr.va_mode = mode;
1010 vattr.va_type = mode & S_IFMT;
1012 afs_maybe_lock_kernel();
1014 code = afs_create(VTOAFS(dip), (char *)name, &vattr, NONEXCL, mode,
1018 struct inode *ip = AFSTOV(vcp);
1020 afs_getattr(vcp, &vattr, credp);
1021 afs_fill_inode(ip, &vattr);
1022 insert_inode_hash(ip);
1023 dp->d_op = &afs_dentry_operations;
1024 dp->d_time = hgetlo(VTOAFS(dip)->f.m.DataVersion);
1025 d_instantiate(dp, ip);
1029 afs_maybe_unlock_kernel();
1031 return afs_convert_code(code);
1034 /* afs_linux_lookup */
1035 static struct dentry *
1036 #ifdef IOP_LOOKUP_TAKES_NAMEIDATA
1037 afs_linux_lookup(struct inode *dip, struct dentry *dp,
1038 struct nameidata *nd)
1040 afs_linux_lookup(struct inode *dip, struct dentry *dp)
1043 cred_t *credp = crref();
1044 struct vcache *vcp = NULL;
1045 const char *comp = dp->d_name.name;
1046 struct inode *ip = NULL;
1047 struct dentry *newdp = NULL;
1050 afs_maybe_lock_kernel();
1052 code = afs_lookup(VTOAFS(dip), (char *)comp, &vcp, credp);
1058 afs_getattr(vcp, &vattr, credp);
1059 afs_fill_inode(ip, &vattr);
1060 if (hlist_unhashed(&ip->i_hash))
1061 insert_inode_hash(ip);
1063 dp->d_op = &afs_dentry_operations;
1064 dp->d_time = hgetlo(VTOAFS(dip)->f.m.DataVersion);
1067 if (ip && S_ISDIR(ip->i_mode)) {
1068 struct dentry *alias;
1070 /* Try to invalidate an existing alias in favor of our new one */
1071 alias = d_find_alias(ip);
1072 /* But not if it's disconnected; then we want d_splice_alias below */
1073 if (alias && !(alias->d_flags & DCACHE_DISCONNECTED)) {
1074 if (d_invalidate(alias) == 0) {
1084 newdp = d_splice_alias(ip, dp);
1086 afs_maybe_unlock_kernel();
1089 /* It's ok for the file to not be found. That's noted by the caller by
1090 * seeing that the dp->d_inode field is NULL.
1092 if (!code || code == ENOENT)
1095 return ERR_PTR(afs_convert_code(code));
1099 afs_linux_link(struct dentry *olddp, struct inode *dip, struct dentry *newdp)
1102 cred_t *credp = crref();
1103 const char *name = newdp->d_name.name;
1104 struct inode *oldip = olddp->d_inode;
1106 /* If afs_link returned the vnode, we could instantiate the
1107 * dentry. Since it's not, we drop this one and do a new lookup.
1112 code = afs_link(VTOAFS(oldip), VTOAFS(dip), (char *)name, credp);
1116 return afs_convert_code(code);
1120 afs_linux_unlink(struct inode *dip, struct dentry *dp)
1123 cred_t *credp = crref();
1124 const char *name = dp->d_name.name;
1125 struct vcache *tvc = VTOAFS(dp->d_inode);
1127 afs_maybe_lock_kernel();
1128 if (VREFCOUNT(tvc) > 1 && tvc->opens > 0
1129 && !(tvc->f.states & CUnlinked)) {
1130 struct dentry *__dp;
1140 osi_FreeSmallSpace(__name);
1141 __name = afs_newname();
1144 __dp = lookup_one_len(__name, dp->d_parent, strlen(__name));
1148 } while (__dp->d_inode != NULL);
1151 code = afs_rename(VTOAFS(dip), (char *)dp->d_name.name, VTOAFS(dip), (char *)__dp->d_name.name, credp);
1153 tvc->mvid = (void *) __name;
1156 crfree(tvc->uncred);
1158 tvc->uncred = credp;
1159 tvc->f.states |= CUnlinked;
1160 afs_linux_set_nfsfs_renamed(dp);
1162 osi_FreeSmallSpace(__name);
1167 __dp->d_time = hgetlo(VTOAFS(dip)->f.m.DataVersion);
1176 code = afs_remove(VTOAFS(dip), (char *)name, credp);
1181 afs_maybe_unlock_kernel();
1183 return afs_convert_code(code);
1188 afs_linux_symlink(struct inode *dip, struct dentry *dp, const char *target)
1191 cred_t *credp = crref();
1193 const char *name = dp->d_name.name;
1195 /* If afs_symlink returned the vnode, we could instantiate the
1196 * dentry. Since it's not, we drop this one and do a new lookup.
1202 code = afs_symlink(VTOAFS(dip), (char *)name, &vattr, (char *)target, credp);
1205 return afs_convert_code(code);
1209 afs_linux_mkdir(struct inode *dip, struct dentry *dp, int mode)
1212 cred_t *credp = crref();
1213 struct vcache *tvcp = NULL;
1215 const char *name = dp->d_name.name;
1217 afs_maybe_lock_kernel();
1219 vattr.va_mask = ATTR_MODE;
1220 vattr.va_mode = mode;
1222 code = afs_mkdir(VTOAFS(dip), (char *)name, &vattr, &tvcp, credp);
1225 struct inode *ip = AFSTOV(tvcp);
1227 afs_getattr(tvcp, &vattr, credp);
1228 afs_fill_inode(ip, &vattr);
1230 dp->d_op = &afs_dentry_operations;
1231 dp->d_time = hgetlo(VTOAFS(dip)->f.m.DataVersion);
1232 d_instantiate(dp, ip);
1236 afs_maybe_unlock_kernel();
1238 return afs_convert_code(code);
1242 afs_linux_rmdir(struct inode *dip, struct dentry *dp)
1245 cred_t *credp = crref();
1246 const char *name = dp->d_name.name;
1248 /* locking kernel conflicts with glock? */
1251 code = afs_rmdir(VTOAFS(dip), (char *)name, credp);
1254 /* Linux likes to see ENOTEMPTY returned from an rmdir() syscall
1255 * that failed because a directory is not empty. So, we map
1256 * EEXIST to ENOTEMPTY on linux.
1258 if (code == EEXIST) {
1267 return afs_convert_code(code);
1272 afs_linux_rename(struct inode *oldip, struct dentry *olddp,
1273 struct inode *newip, struct dentry *newdp)
1276 cred_t *credp = crref();
1277 const char *oldname = olddp->d_name.name;
1278 const char *newname = newdp->d_name.name;
1279 struct dentry *rehash = NULL;
1281 /* Prevent any new references during rename operation. */
1282 afs_maybe_lock_kernel();
1284 if (!d_unhashed(newdp)) {
1289 if (atomic_read(&olddp->d_count) > 1)
1290 shrink_dcache_parent(olddp);
1293 code = afs_rename(VTOAFS(oldip), (char *)oldname, VTOAFS(newip), (char *)newname, credp);
1297 olddp->d_time = 0; /* force to revalidate */
1302 afs_maybe_unlock_kernel();
1305 return afs_convert_code(code);
1309 /* afs_linux_ireadlink
1310 * Internal readlink which can return link contents to user or kernel space.
1311 * Note that the buffer is NOT supposed to be null-terminated.
1314 afs_linux_ireadlink(struct inode *ip, char *target, int maxlen, uio_seg_t seg)
1317 cred_t *credp = crref();
1321 setup_uio(&tuio, &iov, target, (afs_offs_t) 0, maxlen, UIO_READ, seg);
1322 code = afs_readlink(VTOAFS(ip), &tuio, credp);
1326 return maxlen - tuio.uio_resid;
1328 return afs_convert_code(code);
1331 #if !defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
1332 /* afs_linux_readlink
1333 * Fill target (which is in user space) with contents of symlink.
1336 afs_linux_readlink(struct dentry *dp, char *target, int maxlen)
1339 struct inode *ip = dp->d_inode;
1342 code = afs_linux_ireadlink(ip, target, maxlen, AFS_UIOUSER);
1348 /* afs_linux_follow_link
1349 * a file system dependent link following routine.
1351 static int afs_linux_follow_link(struct dentry *dentry, struct nameidata *nd)
1356 name = osi_Alloc(PATH_MAX);
1362 code = afs_linux_ireadlink(dentry->d_inode, name, PATH_MAX - 1, AFS_UIOSYS);
1370 code = vfs_follow_link(nd, name);
1373 osi_Free(name, PATH_MAX);
1378 #endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
1380 #if defined(AFS_CACHE_BYPASS)
1381 #endif /* defined(AFS_CACHE_BYPASS */
1383 /* Populate a page by filling it from the cache file pointed at by cachefp
1384 * (which contains indicated chunk)
1385 * If task is NULL, the page copy occurs syncronously, and the routine
1386 * returns with page still locked. If task is non-NULL, then page copies
1387 * may occur in the background, and the page will be unlocked when it is
1391 afs_linux_read_cache(struct file *cachefp, struct page *page,
1392 int chunk, struct pagevec *lrupv,
1393 struct afs_pagecopy_task *task) {
1394 loff_t offset = page_offset(page);
1395 struct page *newpage, *cachepage;
1396 struct address_space *cachemapping;
1400 cachemapping = cachefp->f_dentry->d_inode->i_mapping;
1404 /* From our offset, we now need to work out which page in the disk
1405 * file it corresponds to. This will be fun ... */
1406 pageindex = (offset - AFS_CHUNKTOBASE(chunk)) >> PAGE_CACHE_SHIFT;
1408 while (cachepage == NULL) {
1409 cachepage = find_get_page(cachemapping, pageindex);
1412 newpage = page_cache_alloc_cold(cachemapping);
1418 code = add_to_page_cache(newpage, cachemapping,
1419 pageindex, GFP_KERNEL);
1421 cachepage = newpage;
1424 page_cache_get(cachepage);
1425 if (!pagevec_add(lrupv, cachepage))
1426 __pagevec_lru_add_file(lrupv);
1429 page_cache_release(newpage);
1431 if (code != -EEXIST)
1435 lock_page(cachepage);
1439 if (!PageUptodate(cachepage)) {
1440 ClearPageError(cachepage);
1441 code = cachemapping->a_ops->readpage(NULL, cachepage);
1442 if (!code && !task) {
1443 wait_on_page_locked(cachepage);
1446 unlock_page(cachepage);
1450 if (PageUptodate(cachepage)) {
1451 copy_highpage(page, cachepage);
1452 flush_dcache_page(page);
1453 SetPageUptodate(page);
1458 afs_pagecopy_queue_page(task, cachepage, page);
1470 page_cache_release(cachepage);
1476 afs_linux_readpage_fastpath(struct file *fp, struct page *pp, int *codep)
1478 loff_t offset = page_offset(pp);
1479 struct inode *ip = FILE_INODE(fp);
1480 struct vcache *avc = VTOAFS(ip);
1482 struct file *cacheFp = NULL;
1485 struct pagevec lrupv;
1487 /* Not a UFS cache, don't do anything */
1488 if (cacheDiskType != AFS_FCACHE_TYPE_UFS)
1491 /* Can't do anything if the vcache isn't statd , or if the read
1492 * crosses a chunk boundary.
1494 if (!(avc->f.states & CStatd) ||
1495 AFS_CHUNK(offset) != AFS_CHUNK(offset + PAGE_SIZE)) {
1499 ObtainWriteLock(&avc->lock, 911);
1501 /* XXX - See if hinting actually makes things faster !!! */
1503 /* See if we have a suitable entry already cached */
1507 /* We need to lock xdcache, then dcache, to handle situations where
1508 * the hint is on the free list. However, we can't safely do this
1509 * according to the locking hierarchy. So, use a non blocking lock.
1511 ObtainReadLock(&afs_xdcache);
1512 dcLocked = ( 0 == NBObtainReadLock(&tdc->lock));
1514 if (dcLocked && (tdc->index != NULLIDX)
1515 && !FidCmp(&tdc->f.fid, &avc->f.fid)
1516 && tdc->f.chunk == AFS_CHUNK(offset)
1517 && !(afs_indexFlags[tdc->index] & (IFFree | IFDiscarded))) {
1518 /* Bonus - the hint was correct */
1521 /* Only destroy the hint if its actually invalid, not if there's
1522 * just been a locking failure */
1524 ReleaseReadLock(&tdc->lock);
1531 ReleaseReadLock(&afs_xdcache);
1534 /* No hint, or hint is no longer valid - see if we can get something
1535 * directly from the dcache
1538 tdc = afs_FindDCache(avc, offset);
1541 ReleaseWriteLock(&avc->lock);
1546 ObtainReadLock(&tdc->lock);
1548 /* Is the dcache we've been given currently up to date */
1549 if (!hsame(avc->f.m.DataVersion, tdc->f.versionNo) ||
1550 (tdc->dflags & DFFetching)) {
1551 ReleaseWriteLock(&avc->lock);
1552 ReleaseReadLock(&tdc->lock);
1557 /* Update our hint for future abuse */
1560 /* Okay, so we've now got a cache file that is up to date */
1562 /* XXX - I suspect we should be locking the inodes before we use them! */
1564 cacheFp = afs_linux_raw_open(&tdc->f.inode, NULL);
1565 pagevec_init(&lrupv, 0);
1567 code = afs_linux_read_cache(cacheFp, pp, tdc->f.chunk, &lrupv, NULL);
1569 if (pagevec_count(&lrupv))
1570 __pagevec_lru_add_file(&lrupv);
1572 filp_close(cacheFp, NULL);
1575 ReleaseReadLock(&tdc->lock);
1576 ReleaseWriteLock(&avc->lock);
1583 /* afs_linux_readpage
1585 * This function is split into two, because prepare_write/begin_write
1586 * require a readpage call which doesn't unlock the resulting page upon
1590 afs_linux_fillpage(struct file *fp, struct page *pp)
1595 struct iovec *iovecp;
1596 struct inode *ip = FILE_INODE(fp);
1597 afs_int32 cnt = page_count(pp);
1598 struct vcache *avc = VTOAFS(ip);
1599 afs_offs_t offset = page_offset(pp);
1603 if (afs_linux_readpage_fastpath(fp, pp, &code)) {
1613 auio = osi_Alloc(sizeof(uio_t));
1614 iovecp = osi_Alloc(sizeof(struct iovec));
1616 setup_uio(auio, iovecp, (char *)address, offset, PAGE_SIZE, UIO_READ,
1619 afs_maybe_lock_kernel();
1622 afs_Trace4(afs_iclSetp, CM_TRACE_READPAGE, ICL_TYPE_POINTER, ip,
1623 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, cnt, ICL_TYPE_INT32,
1624 99999); /* not a possible code value */
1626 code = afs_rdwr(avc, auio, UIO_READ, 0, credp);
1628 afs_Trace4(afs_iclSetp, CM_TRACE_READPAGE, ICL_TYPE_POINTER, ip,
1629 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, cnt, ICL_TYPE_INT32,
1631 AFS_DISCON_UNLOCK();
1633 afs_maybe_unlock_kernel();
1635 /* XXX valid for no-cache also? Check last bits of files... :)
1636 * Cognate code goes in afs_NoCacheFetchProc. */
1637 if (auio->uio_resid) /* zero remainder of page */
1638 memset((void *)(address + (PAGE_SIZE - auio->uio_resid)), 0,
1641 flush_dcache_page(pp);
1642 SetPageUptodate(pp);
1647 osi_Free(auio, sizeof(uio_t));
1648 osi_Free(iovecp, sizeof(struct iovec));
1651 return afs_convert_code(code);
1655 afs_linux_prefetch(struct file *fp, struct page *pp)
1658 struct vcache *avc = VTOAFS(FILE_INODE(fp));
1659 afs_offs_t offset = page_offset(pp);
1661 if (AFS_CHUNKOFFSET(offset) == 0) {
1663 struct vrequest treq;
1668 code = afs_InitReq(&treq, credp);
1669 if (!code && !NBObtainWriteLock(&avc->lock, 534)) {
1670 tdc = afs_FindDCache(avc, offset);
1672 if (!(tdc->mflags & DFNextStarted))
1673 afs_PrefetchChunk(avc, tdc, credp, &treq);
1676 ReleaseWriteLock(&avc->lock);
1681 return afs_convert_code(code);
1685 #if defined(AFS_CACHE_BYPASS)
1688 afs_linux_bypass_readpages(struct file *fp, struct address_space *mapping,
1689 struct list_head *page_list, unsigned num_pages)
1694 struct iovec* iovecp;
1695 struct nocache_read_request *ancr;
1696 struct page *pp, *ppt;
1697 struct pagevec lrupv;
1701 struct inode *ip = FILE_INODE(fp);
1702 struct vcache *avc = VTOAFS(ip);
1703 afs_int32 base_index = 0;
1704 afs_int32 page_count = 0;
1707 /* background thread must free: iovecp, auio, ancr */
1708 iovecp = osi_Alloc(num_pages * sizeof(struct iovec));
1710 auio = osi_Alloc(sizeof(uio_t));
1711 auio->uio_iov = iovecp;
1712 auio->uio_iovcnt = num_pages;
1713 auio->uio_flag = UIO_READ;
1714 auio->uio_seg = AFS_UIOSYS;
1715 auio->uio_resid = num_pages * PAGE_SIZE;
1717 ancr = osi_Alloc(sizeof(struct nocache_read_request));
1719 ancr->offset = auio->uio_offset;
1720 ancr->length = auio->uio_resid;
1722 pagevec_init(&lrupv, 0);
1724 for(page_ix = 0; page_ix < num_pages; ++page_ix) {
1726 if(list_empty(page_list))
1729 pp = list_entry(page_list->prev, struct page, lru);
1730 /* If we allocate a page and don't remove it from page_list,
1731 * the page cache gets upset. */
1733 isize = (i_size_read(fp->f_mapping->host) - 1) >> PAGE_CACHE_SHIFT;
1734 if(pp->index > isize) {
1741 offset = ((lof_t) pp->index) << PAGE_CACHE_SHIFT;
1742 auio->uio_offset = offset;
1743 base_index = pp->index;
1745 iovecp[page_ix].iov_len = PAGE_SIZE;
1746 code = add_to_page_cache(pp, mapping, pp->index, GFP_KERNEL);
1747 if(base_index != pp->index) {
1750 page_cache_release(pp);
1751 iovecp[page_ix].iov_base = (void *) 0;
1759 page_cache_release(pp);
1760 iovecp[page_ix].iov_base = (void *) 0;
1763 if(!PageLocked(pp)) {
1767 /* save the page for background map */
1768 iovecp[page_ix].iov_base = (void*) pp;
1770 /* and put it on the LRU cache */
1771 if (!pagevec_add(&lrupv, pp))
1772 __pagevec_lru_add(&lrupv);
1776 /* If there were useful pages in the page list, make sure all pages
1777 * are in the LRU cache, then schedule the read */
1779 pagevec_lru_add(&lrupv);
1781 code = afs_ReadNoCache(avc, ancr, credp);
1784 /* If there is nothing for the background thread to handle,
1785 * it won't be freeing the things that we never gave it */
1786 osi_Free(iovecp, num_pages * sizeof(struct iovec));
1787 osi_Free(auio, sizeof(uio_t));
1788 osi_Free(ancr, sizeof(struct nocache_read_request));
1790 /* we do not flush, release, or unmap pages--that will be
1791 * done for us by the background thread as each page comes in
1792 * from the fileserver */
1794 return afs_convert_code(code);
1799 afs_linux_bypass_readpage(struct file *fp, struct page *pp)
1801 cred_t *credp = NULL;
1803 struct iovec *iovecp;
1804 struct nocache_read_request *ancr;
1809 /* If the page is past the end of the file, skip it */
1810 isize = (i_size_read(fp->f_mapping->host) - 1) >> PAGE_CACHE_SHIFT;
1811 if(pp->index > isize) {
1816 /* receiver frees */
1817 auio = osi_Alloc(sizeof(uio_t));
1818 iovecp = osi_Alloc(sizeof(struct iovec));
1820 /* address can be NULL, because we overwrite it with 'pp', below */
1821 setup_uio(auio, iovecp, NULL, (pp->index << PAGE_CACHE_SHIFT),
1822 PAGE_SIZE, UIO_READ, AFS_UIOSYS);
1824 /* save the page for background map */
1825 /* XXX - Shouldn't we get a reference count here? */
1826 auio->uio_iov->iov_base = (void*) pp;
1827 /* the background thread will free this */
1828 ancr = osi_Alloc(sizeof(struct nocache_read_request));
1830 ancr->offset = offset;
1831 ancr->length = PAGE_SIZE;
1834 afs_maybe_lock_kernel();
1835 code = afs_ReadNoCache(VTOAFS(FILE_INODE(fp)), ancr, credp);
1836 afs_maybe_unlock_kernel();
1839 return afs_convert_code(code);
1843 afs_linux_can_bypass(struct inode *ip) {
1844 switch(cache_bypass_strategy) {
1845 case NEVER_BYPASS_CACHE:
1847 case ALWAYS_BYPASS_CACHE:
1849 case LARGE_FILES_BYPASS_CACHE:
1850 if(i_size_read(ip) > cache_bypass_threshold)
1857 /* Check if a file is permitted to bypass the cache by policy, and modify
1858 * the cache bypass state recorded for that file */
1861 afs_linux_bypass_check(struct inode *ip) {
1864 int bypass = afs_linux_can_bypass(ip);
1867 trydo_cache_transition(VTOAFS(ip)), credp, bypass);
1875 afs_linux_bypass_check(struct inode *ip) {
1879 afs_linux_bypass_readpage(struct file *fp, struct page *pp) {
1883 afs_linux_bypass_readpages(struct file *fp, struct address_space *mapping,
1884 struct list_head *page_list, unsigned int num_pages) {
1890 afs_linux_readpage(struct file *fp, struct page *pp)
1894 if (afs_linux_bypass_check(FILE_INODE(fp))) {
1895 code = afs_linux_bypass_readpage(fp, pp);
1897 code = afs_linux_fillpage(fp, pp);
1899 code = afs_linux_prefetch(fp, pp);
1906 /* Readpages reads a number of pages for a particular file. We use
1907 * this to optimise the reading, by limiting the number of times upon which
1908 * we have to lookup, lock and open vcaches and dcaches
1912 afs_linux_readpages(struct file *fp, struct address_space *mapping,
1913 struct list_head *page_list, unsigned int num_pages)
1915 struct inode *inode = mapping->host;
1916 struct vcache *avc = VTOAFS(inode);
1918 struct file *cacheFp = NULL;
1920 unsigned int page_idx;
1922 struct pagevec lrupv;
1923 struct afs_pagecopy_task *task;
1925 if (afs_linux_bypass_check(inode))
1926 return afs_linux_bypass_readpages(fp, mapping, page_list, num_pages);
1929 if ((code = afs_linux_VerifyVCache(avc, NULL))) {
1934 ObtainWriteLock(&avc->lock, 912);
1937 task = afs_pagecopy_init_task();
1940 pagevec_init(&lrupv, 0);
1941 for (page_idx = 0; page_idx < num_pages; page_idx++) {
1942 struct page *page = list_entry(page_list->prev, struct page, lru);
1943 list_del(&page->lru);
1944 offset = page_offset(page);
1946 if (tdc && tdc->f.chunk != AFS_CHUNK(offset)) {
1948 ReleaseReadLock(&tdc->lock);
1953 filp_close(cacheFp, NULL);
1958 if ((tdc = afs_FindDCache(avc, offset))) {
1959 ObtainReadLock(&tdc->lock);
1960 if (!hsame(avc->f.m.DataVersion, tdc->f.versionNo) ||
1961 (tdc->dflags & DFFetching)) {
1962 ReleaseReadLock(&tdc->lock);
1969 cacheFp = afs_linux_raw_open(&tdc->f.inode, NULL);
1972 if (tdc && !add_to_page_cache(page, mapping, page->index,
1974 page_cache_get(page);
1975 if (!pagevec_add(&lrupv, page))
1976 __pagevec_lru_add_file(&lrupv);
1978 afs_linux_read_cache(cacheFp, page, tdc->f.chunk, &lrupv, task);
1980 page_cache_release(page);
1982 if (pagevec_count(&lrupv))
1983 __pagevec_lru_add_file(&lrupv);
1986 filp_close(cacheFp, NULL);
1988 afs_pagecopy_put_task(task);
1992 ReleaseReadLock(&tdc->lock);
1996 ReleaseWriteLock(&avc->lock);
2002 afs_linux_writepage_sync(struct inode *ip, struct page *pp,
2003 unsigned long offset, unsigned int count)
2005 struct vcache *vcp = VTOAFS(ip);
2014 buffer = kmap(pp) + offset;
2015 base = (((loff_t) pp->index) << PAGE_CACHE_SHIFT) + offset;
2018 afs_maybe_lock_kernel();
2020 afs_Trace4(afs_iclSetp, CM_TRACE_UPDATEPAGE, ICL_TYPE_POINTER, vcp,
2021 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, page_count(pp),
2022 ICL_TYPE_INT32, 99999);
2024 ObtainWriteLock(&vcp->lock, 532);
2025 if (vcp->f.states & CPageWrite) {
2026 ReleaseWriteLock(&vcp->lock);
2028 afs_maybe_unlock_kernel();
2031 return AOP_WRITEPAGE_ACTIVATE;
2033 vcp->f.states |= CPageWrite;
2034 ReleaseWriteLock(&vcp->lock);
2036 setup_uio(&tuio, &iovec, buffer, base, count, UIO_WRITE, AFS_UIOSYS);
2038 code = afs_write(vcp, &tuio, f_flags, credp, 0);
2040 i_size_write(ip, vcp->f.m.Length);
2041 ip->i_blocks = ((vcp->f.m.Length + 1023) >> 10) << 1;
2043 ObtainWriteLock(&vcp->lock, 533);
2045 struct vrequest treq;
2047 if (!afs_InitReq(&treq, credp))
2048 code = afs_DoPartialWrite(vcp, &treq);
2050 code = code ? afs_convert_code(code) : count - tuio.uio_resid;
2052 vcp->f.states &= ~CPageWrite;
2053 ReleaseWriteLock(&vcp->lock);
2055 afs_Trace4(afs_iclSetp, CM_TRACE_UPDATEPAGE, ICL_TYPE_POINTER, vcp,
2056 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, page_count(pp),
2057 ICL_TYPE_INT32, code);
2060 afs_maybe_unlock_kernel();
2069 #ifdef AOP_WRITEPAGE_TAKES_WRITEBACK_CONTROL
2070 afs_linux_writepage(struct page *pp, struct writeback_control *wbc)
2072 afs_linux_writepage(struct page *pp)
2075 struct address_space *mapping = pp->mapping;
2076 struct inode *inode;
2077 unsigned long end_index;
2078 unsigned offset = PAGE_CACHE_SIZE;
2081 if (PageReclaim(pp)) {
2082 return AOP_WRITEPAGE_ACTIVATE;
2085 inode = (struct inode *)mapping->host;
2086 end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
2089 if (pp->index < end_index)
2091 /* things got complicated... */
2092 offset = i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
2093 /* OK, are we completely out? */
2094 if (pp->index >= end_index + 1 || !offset)
2097 status = afs_linux_writepage_sync(inode, pp, 0, offset);
2098 SetPageUptodate(pp);
2099 if ( status != AOP_WRITEPAGE_ACTIVATE )
2101 if (status == offset)
2107 /* afs_linux_permission
2108 * Check access rights - returns error if can't check or permission denied.
2111 #ifdef IOP_PERMISSION_TAKES_NAMEIDATA
2112 afs_linux_permission(struct inode *ip, int mode, struct nameidata *nd)
2114 afs_linux_permission(struct inode *ip, int mode)
2118 cred_t *credp = crref();
2122 if (mode & MAY_EXEC)
2124 if (mode & MAY_READ)
2126 if (mode & MAY_WRITE)
2128 code = afs_access(VTOAFS(ip), tmp, credp);
2132 return afs_convert_code(code);
2135 #if !defined(HAVE_WRITE_BEGIN)
2137 afs_linux_commit_write(struct file *file, struct page *page, unsigned offset,
2142 code = afs_linux_writepage_sync(file->f_dentry->d_inode, page,
2143 offset, to - offset);
2149 afs_linux_prepare_write(struct file *file, struct page *page, unsigned from,
2157 afs_linux_write_end(struct file *file, struct address_space *mapping,
2158 loff_t pos, unsigned len, unsigned copied,
2159 struct page *page, void *fsdata)
2162 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
2164 code = afs_linux_writepage_sync(file->f_dentry->d_inode, page,
2167 page_cache_release(page);
2172 afs_linux_write_begin(struct file *file, struct address_space *mapping,
2173 loff_t pos, unsigned len, unsigned flags,
2174 struct page **pagep, void **fsdata)
2177 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2178 page = grab_cache_page_write_begin(mapping, index, flags);
2186 static struct inode_operations afs_file_iops = {
2187 .permission = afs_linux_permission,
2188 .getattr = afs_linux_getattr,
2189 .setattr = afs_notify_change,
2192 static struct address_space_operations afs_file_aops = {
2193 .readpage = afs_linux_readpage,
2194 .readpages = afs_linux_readpages,
2195 .writepage = afs_linux_writepage,
2196 #if defined (HAVE_WRITE_BEGIN)
2197 .write_begin = afs_linux_write_begin,
2198 .write_end = afs_linux_write_end,
2200 .commit_write = afs_linux_commit_write,
2201 .prepare_write = afs_linux_prepare_write,
2206 /* Separate ops vector for directories. Linux 2.2 tests type of inode
2207 * by what sort of operation is allowed.....
2210 static struct inode_operations afs_dir_iops = {
2211 .setattr = afs_notify_change,
2212 .create = afs_linux_create,
2213 .lookup = afs_linux_lookup,
2214 .link = afs_linux_link,
2215 .unlink = afs_linux_unlink,
2216 .symlink = afs_linux_symlink,
2217 .mkdir = afs_linux_mkdir,
2218 .rmdir = afs_linux_rmdir,
2219 .rename = afs_linux_rename,
2220 .getattr = afs_linux_getattr,
2221 .permission = afs_linux_permission,
2224 /* We really need a separate symlink set of ops, since do_follow_link()
2225 * determines if it _is_ a link by checking if the follow_link op is set.
2227 #if defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
2229 afs_symlink_filler(struct file *file, struct page *page)
2231 struct inode *ip = (struct inode *)page->mapping->host;
2232 char *p = (char *)kmap(page);
2235 afs_maybe_lock_kernel();
2237 code = afs_linux_ireadlink(ip, p, PAGE_SIZE, AFS_UIOSYS);
2242 p[code] = '\0'; /* null terminate? */
2243 afs_maybe_unlock_kernel();
2245 SetPageUptodate(page);
2251 afs_maybe_unlock_kernel();
2259 static struct address_space_operations afs_symlink_aops = {
2260 .readpage = afs_symlink_filler
2262 #endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
2264 static struct inode_operations afs_symlink_iops = {
2265 #if defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
2266 .readlink = page_readlink,
2267 # if defined(HAVE_KERNEL_PAGE_FOLLOW_LINK)
2268 .follow_link = page_follow_link,
2270 .follow_link = page_follow_link_light,
2271 .put_link = page_put_link,
2273 #else /* !defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE) */
2274 .readlink = afs_linux_readlink,
2275 .follow_link = afs_linux_follow_link,
2276 #endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
2277 .setattr = afs_notify_change,
2281 afs_fill_inode(struct inode *ip, struct vattr *vattr)
2285 vattr2inode(ip, vattr);
2287 ip->i_mapping->backing_dev_info = &afs_backing_dev_info;
2288 /* Reset ops if symlink or directory. */
2289 if (S_ISREG(ip->i_mode)) {
2290 ip->i_op = &afs_file_iops;
2291 ip->i_fop = &afs_file_fops;
2292 ip->i_data.a_ops = &afs_file_aops;
2294 } else if (S_ISDIR(ip->i_mode)) {
2295 ip->i_op = &afs_dir_iops;
2296 ip->i_fop = &afs_dir_fops;
2298 } else if (S_ISLNK(ip->i_mode)) {
2299 ip->i_op = &afs_symlink_iops;
2300 #if defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
2301 ip->i_data.a_ops = &afs_symlink_aops;
2302 ip->i_mapping = &ip->i_data;