2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
11 * Linux specific vnodeops. Also includes the glue routines required to call
14 * So far the only truly scary part is that Linux relies on the inode cache
15 * to be up to date. Don't you dare break a callback and expect an fstat
16 * to give you meaningful information. This appears to be fixed in the 2.1
17 * development kernels. As it is we can fix this now by intercepting the
21 #include <afsconfig.h>
22 #include "afs/param.h"
25 #include "afs/sysincludes.h"
26 #include "afsincludes.h"
27 #include "afs/afs_stats.h"
29 #ifdef HAVE_MM_INLINE_H
30 #include <linux/mm_inline.h>
32 #include <linux/pagemap.h>
33 #include <linux/writeback.h>
34 #include <linux/pagevec.h>
36 #include "afs/afs_bypasscache.h"
38 #include "osi_compat.h"
39 #include "osi_pagecopy.h"
41 #ifndef HAVE_LINUX_PAGEVEC_LRU_ADD_FILE
42 #define __pagevec_lru_add_file __pagevec_lru_add
46 #define MAX_ERRNO 1000L
49 extern struct backing_dev_info *afs_backing_dev_info;
51 extern struct vcache *afs_globalVp;
52 extern int afs_notify_change(struct dentry *dp, struct iattr *iattrp);
54 /* This function converts a positive error code from AFS into a negative
55 * code suitable for passing into the Linux VFS layer. It checks that the
56 * error code is within the permissable bounds for the ERR_PTR mechanism.
58 * _All_ error codes which come from the AFS layer should be passed through
59 * this function before being returned to the kernel.
63 afs_convert_code(int code) {
64 if ((code >= 0) && (code <= MAX_ERRNO))
70 /* Linux doesn't require a credp for many functions, and crref is an expensive
71 * operation. This helper function avoids obtaining it for VerifyVCache calls
75 afs_linux_VerifyVCache(struct vcache *avc, cred_t **retcred) {
80 if (avc->f.states & CStatd) {
88 code = afs_InitReq(&treq, credp);
90 code = afs_VerifyVCache2(avc, &treq);
97 return afs_convert_code(code);
101 afs_linux_read(struct file *fp, char *buf, size_t count, loff_t * offp)
104 struct vcache *vcp = VTOAFS(fp->f_dentry->d_inode);
107 afs_Trace4(afs_iclSetp, CM_TRACE_READOP, ICL_TYPE_POINTER, vcp,
108 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
110 code = afs_linux_VerifyVCache(vcp, NULL);
113 /* Linux's FlushPages implementation doesn't ever use credp,
114 * so we optimise by not using it */
115 osi_FlushPages(vcp, NULL); /* ensure stale pages are gone */
117 code = do_sync_read(fp, buf, count, offp);
121 afs_Trace4(afs_iclSetp, CM_TRACE_READOP, ICL_TYPE_POINTER, vcp,
122 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
129 /* Now we have integrated VM for writes as well as reads. generic_file_write
130 * also takes care of re-positioning the pointer if file is open in append
131 * mode. Call fake open/close to ensure we do writes of core dumps.
134 afs_linux_write(struct file *fp, const char *buf, size_t count, loff_t * offp)
137 struct vcache *vcp = VTOAFS(fp->f_dentry->d_inode);
142 afs_Trace4(afs_iclSetp, CM_TRACE_WRITEOP, ICL_TYPE_POINTER, vcp,
143 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
144 (fp->f_flags & O_APPEND) ? 99998 : 99999);
146 code = afs_linux_VerifyVCache(vcp, &credp);
148 ObtainWriteLock(&vcp->lock, 529);
150 ReleaseWriteLock(&vcp->lock);
153 code = do_sync_write(fp, buf, count, offp);
157 ObtainWriteLock(&vcp->lock, 530);
159 if (vcp->execsOrWriters == 1 && !credp)
162 afs_FakeClose(vcp, credp);
163 ReleaseWriteLock(&vcp->lock);
165 afs_Trace4(afs_iclSetp, CM_TRACE_WRITEOP, ICL_TYPE_POINTER, vcp,
166 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
175 extern int BlobScan(struct dcache * afile, afs_int32 ablob);
177 /* This is a complete rewrite of afs_readdir, since we can make use of
178 * filldir instead of afs_readdir_move. Note that changes to vcache/dcache
179 * handling and use of bulkstats will need to be reflected here as well.
182 afs_linux_readdir(struct file *fp, void *dirbuf, filldir_t filldir)
184 struct vcache *avc = VTOAFS(FILE_INODE(fp));
185 struct vrequest treq;
193 afs_size_t origOffset, tlen;
194 cred_t *credp = crref();
195 struct afs_fakestat_state fakestat;
198 AFS_STATCNT(afs_readdir);
200 code = afs_convert_code(afs_InitReq(&treq, credp));
205 afs_InitFakeStat(&fakestat);
206 code = afs_convert_code(afs_EvalFakeStat(&avc, &fakestat, &treq));
210 /* update the cache entry */
212 code = afs_convert_code(afs_VerifyVCache2(avc, &treq));
216 /* get a reference to the entire directory */
217 tdc = afs_GetDCache(avc, (afs_size_t) 0, &treq, &origOffset, &tlen, 1);
223 ObtainSharedLock(&avc->lock, 810);
224 UpgradeSToWLock(&avc->lock, 811);
225 ObtainReadLock(&tdc->lock);
227 * Make sure that the data in the cache is current. There are two
228 * cases we need to worry about:
229 * 1. The cache data is being fetched by another process.
230 * 2. The cache data is no longer valid
232 while ((avc->f.states & CStatd)
233 && (tdc->dflags & DFFetching)
234 && hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
235 ReleaseReadLock(&tdc->lock);
236 ReleaseSharedLock(&avc->lock);
237 afs_osi_Sleep(&tdc->validPos);
238 ObtainSharedLock(&avc->lock, 812);
239 ObtainReadLock(&tdc->lock);
241 if (!(avc->f.states & CStatd)
242 || !hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
243 ReleaseReadLock(&tdc->lock);
244 ReleaseSharedLock(&avc->lock);
249 /* Set the readdir-in-progress flag, and downgrade the lock
250 * to shared so others will be able to acquire a read lock.
252 avc->f.states |= CReadDir;
253 avc->dcreaddir = tdc;
254 avc->readdir_pid = MyPidxx2Pid(MyPidxx);
255 ConvertWToSLock(&avc->lock);
257 /* Fill in until we get an error or we're done. This implementation
258 * takes an offset in units of blobs, rather than bytes.
261 offset = (int) fp->f_pos;
263 dirpos = BlobScan(tdc, offset);
267 de = afs_dir_GetBlob(tdc, dirpos);
271 ino = afs_calc_inum (avc->f.fid.Fid.Volume, ntohl(de->fid.vnode));
274 len = strlen(de->name);
276 printf("afs_linux_readdir: afs_dir_GetBlob failed, null name (inode %lx, dirpos %d)\n",
277 (unsigned long)&tdc->f.inode, dirpos);
279 ReleaseSharedLock(&avc->lock);
285 /* filldir returns -EINVAL when the buffer is full. */
287 unsigned int type = DT_UNKNOWN;
288 struct VenusFid afid;
291 afid.Cell = avc->f.fid.Cell;
292 afid.Fid.Volume = avc->f.fid.Fid.Volume;
293 afid.Fid.Vnode = ntohl(de->fid.vnode);
294 afid.Fid.Unique = ntohl(de->fid.vunique);
295 if ((avc->f.states & CForeign) == 0 && (ntohl(de->fid.vnode) & 1)) {
297 } else if ((tvc = afs_FindVCache(&afid, 0, 0))) {
300 } else if (((tvc->f.states) & (CStatd | CTruth))) {
301 /* CTruth will be set if the object has
306 else if (vtype == VREG)
308 /* Don't do this until we're sure it can't be a mtpt */
309 /* else if (vtype == VLNK)
311 /* what other types does AFS support? */
313 /* clean up from afs_FindVCache */
317 * If this is NFS readdirplus, then the filler is going to
318 * call getattr on this inode, which will deadlock if we're
322 code = (*filldir) (dirbuf, de->name, len, offset, ino, type);
328 offset = dirpos + 1 + ((len + 16) >> 5);
330 /* If filldir didn't fill in the last one this is still pointing to that
333 fp->f_pos = (loff_t) offset;
335 ReleaseReadLock(&tdc->lock);
337 UpgradeSToWLock(&avc->lock, 813);
338 avc->f.states &= ~CReadDir;
340 avc->readdir_pid = 0;
341 ReleaseSharedLock(&avc->lock);
345 afs_PutFakeStat(&fakestat);
352 /* in afs_pioctl.c */
353 extern int afs_xioctl(struct inode *ip, struct file *fp, unsigned int com,
356 #if defined(HAVE_UNLOCKED_IOCTL) || defined(HAVE_COMPAT_IOCTL)
357 static long afs_unlocked_xioctl(struct file *fp, unsigned int com,
359 return afs_xioctl(FILE_INODE(fp), fp, com, arg);
366 afs_linux_mmap(struct file *fp, struct vm_area_struct *vmap)
368 struct vcache *vcp = VTOAFS(FILE_INODE(fp));
372 afs_Trace3(afs_iclSetp, CM_TRACE_GMAP, ICL_TYPE_POINTER, vcp,
373 ICL_TYPE_POINTER, vmap->vm_start, ICL_TYPE_INT32,
374 vmap->vm_end - vmap->vm_start);
376 /* get a validated vcache entry */
377 code = afs_linux_VerifyVCache(vcp, NULL);
380 /* Linux's Flushpage implementation doesn't use credp, so optimise
381 * our code to not need to crref() it */
382 osi_FlushPages(vcp, NULL); /* ensure stale pages are gone */
384 code = generic_file_mmap(fp, vmap);
387 vcp->f.states |= CMAPPED;
395 afs_linux_open(struct inode *ip, struct file *fp)
397 struct vcache *vcp = VTOAFS(ip);
398 cred_t *credp = crref();
402 code = afs_open(&vcp, fp->f_flags, credp);
406 return afs_convert_code(code);
410 afs_linux_release(struct inode *ip, struct file *fp)
412 struct vcache *vcp = VTOAFS(ip);
413 cred_t *credp = crref();
417 code = afs_close(vcp, fp->f_flags, credp);
418 ObtainWriteLock(&vcp->lock, 807);
423 ReleaseWriteLock(&vcp->lock);
427 return afs_convert_code(code);
431 #if defined(FOP_FSYNC_TAKES_DENTRY)
432 afs_linux_fsync(struct file *fp, struct dentry *dp, int datasync)
434 afs_linux_fsync(struct file *fp, int datasync)
438 struct inode *ip = FILE_INODE(fp);
439 cred_t *credp = crref();
442 code = afs_fsync(VTOAFS(ip), credp);
445 return afs_convert_code(code);
451 afs_linux_lock(struct file *fp, int cmd, struct file_lock *flp)
454 struct vcache *vcp = VTOAFS(FILE_INODE(fp));
455 cred_t *credp = crref();
456 struct AFS_FLOCK flock;
458 /* Convert to a lock format afs_lockctl understands. */
459 memset(&flock, 0, sizeof(flock));
460 flock.l_type = flp->fl_type;
461 flock.l_pid = flp->fl_pid;
463 flock.l_start = flp->fl_start;
464 if (flp->fl_end == OFFSET_MAX)
465 flock.l_len = 0; /* Lock to end of file */
467 flock.l_len = flp->fl_end - flp->fl_start + 1;
469 /* Safe because there are no large files, yet */
470 #if defined(F_GETLK64) && (F_GETLK != F_GETLK64)
471 if (cmd == F_GETLK64)
473 else if (cmd == F_SETLK64)
475 else if (cmd == F_SETLKW64)
477 #endif /* F_GETLK64 && F_GETLK != F_GETLK64 */
480 code = afs_convert_code(afs_lockctl(vcp, &flock, cmd, credp));
483 if ((code == 0 || flp->fl_type == F_UNLCK) &&
484 (cmd == F_SETLK || cmd == F_SETLKW)) {
485 code = afs_posix_lock_file(fp, flp);
486 if (code && flp->fl_type != F_UNLCK) {
487 struct AFS_FLOCK flock2;
489 flock2.l_type = F_UNLCK;
491 afs_lockctl(vcp, &flock2, F_SETLK, credp);
495 /* If lockctl says there are no conflicting locks, then also check with the
496 * kernel, as lockctl knows nothing about byte range locks
498 if (code == 0 && cmd == F_GETLK && flock.l_type == F_UNLCK) {
499 afs_posix_test_lock(fp, flp);
500 /* If we found a lock in the kernel's structure, return it */
501 if (flp->fl_type != F_UNLCK) {
507 /* Convert flock back to Linux's file_lock */
508 flp->fl_type = flock.l_type;
509 flp->fl_pid = flock.l_pid;
510 flp->fl_start = flock.l_start;
511 if (flock.l_len == 0)
512 flp->fl_end = OFFSET_MAX; /* Lock to end of file */
514 flp->fl_end = flock.l_start + flock.l_len - 1;
520 #ifdef STRUCT_FILE_OPERATIONS_HAS_FLOCK
522 afs_linux_flock(struct file *fp, int cmd, struct file_lock *flp) {
524 struct vcache *vcp = VTOAFS(FILE_INODE(fp));
525 cred_t *credp = crref();
526 struct AFS_FLOCK flock;
527 /* Convert to a lock format afs_lockctl understands. */
528 memset(&flock, 0, sizeof(flock));
529 flock.l_type = flp->fl_type;
530 flock.l_pid = flp->fl_pid;
535 /* Safe because there are no large files, yet */
536 #if defined(F_GETLK64) && (F_GETLK != F_GETLK64)
537 if (cmd == F_GETLK64)
539 else if (cmd == F_SETLK64)
541 else if (cmd == F_SETLKW64)
543 #endif /* F_GETLK64 && F_GETLK != F_GETLK64 */
546 code = afs_convert_code(afs_lockctl(vcp, &flock, cmd, credp));
549 if ((code == 0 || flp->fl_type == F_UNLCK) &&
550 (cmd == F_SETLK || cmd == F_SETLKW)) {
551 flp->fl_flags &=~ FL_SLEEP;
552 code = flock_lock_file_wait(fp, flp);
553 if (code && flp->fl_type != F_UNLCK) {
554 struct AFS_FLOCK flock2;
556 flock2.l_type = F_UNLCK;
558 afs_lockctl(vcp, &flock2, F_SETLK, credp);
562 /* Convert flock back to Linux's file_lock */
563 flp->fl_type = flock.l_type;
564 flp->fl_pid = flock.l_pid;
572 * essentially the same as afs_fsync() but we need to get the return
573 * code for the sys_close() here, not afs_linux_release(), so call
574 * afs_StoreAllSegments() with AFS_LASTSTORE
577 #if defined(FOP_FLUSH_TAKES_FL_OWNER_T)
578 afs_linux_flush(struct file *fp, fl_owner_t id)
580 afs_linux_flush(struct file *fp)
583 struct vrequest treq;
591 if ((fp->f_flags & O_ACCMODE) == O_RDONLY) { /* readers dont flush */
599 vcp = VTOAFS(FILE_INODE(fp));
601 code = afs_InitReq(&treq, credp);
604 /* If caching is bypassed for this file, or globally, just return 0 */
605 if (cache_bypass_strategy == ALWAYS_BYPASS_CACHE)
608 ObtainReadLock(&vcp->lock);
609 if (vcp->cachingStates & FCSBypass)
611 ReleaseReadLock(&vcp->lock);
614 /* future proof: don't rely on 0 return from afs_InitReq */
619 ObtainSharedLock(&vcp->lock, 535);
620 if ((vcp->execsOrWriters > 0) && (file_count(fp) == 1)) {
621 UpgradeSToWLock(&vcp->lock, 536);
622 if (!AFS_IS_DISCONNECTED) {
623 code = afs_StoreAllSegments(vcp,
625 AFS_SYNC | AFS_LASTSTORE);
627 afs_DisconAddDirty(vcp, VDisconWriteOsiFlush, 1);
629 ConvertWToSLock(&vcp->lock);
631 code = afs_CheckCode(code, &treq, 54);
632 ReleaseSharedLock(&vcp->lock);
639 return afs_convert_code(code);
642 struct file_operations afs_dir_fops = {
643 .read = generic_read_dir,
644 .readdir = afs_linux_readdir,
645 #ifdef HAVE_UNLOCKED_IOCTL
646 .unlocked_ioctl = afs_unlocked_xioctl,
650 #ifdef HAVE_COMPAT_IOCTL
651 .compat_ioctl = afs_unlocked_xioctl,
653 .open = afs_linux_open,
654 .release = afs_linux_release,
655 .llseek = default_llseek,
658 struct file_operations afs_file_fops = {
659 .read = afs_linux_read,
660 .write = afs_linux_write,
661 #ifdef HAVE_LINUX_GENERIC_FILE_AIO_READ
662 .aio_read = generic_file_aio_read,
663 .aio_write = generic_file_aio_write,
665 #ifdef HAVE_UNLOCKED_IOCTL
666 .unlocked_ioctl = afs_unlocked_xioctl,
670 #ifdef HAVE_COMPAT_IOCTL
671 .compat_ioctl = afs_unlocked_xioctl,
673 .mmap = afs_linux_mmap,
674 .open = afs_linux_open,
675 .flush = afs_linux_flush,
676 #if defined(STRUCT_FILE_OPERATIONS_HAS_SENDFILE)
677 .sendfile = generic_file_sendfile,
679 #if defined(STRUCT_FILE_OPERATIONS_HAS_SPLICE)
680 .splice_write = generic_file_splice_write,
681 .splice_read = generic_file_splice_read,
683 .release = afs_linux_release,
684 .fsync = afs_linux_fsync,
685 .lock = afs_linux_lock,
686 #ifdef STRUCT_FILE_OPERATIONS_HAS_FLOCK
687 .flock = afs_linux_flock,
689 .llseek = default_llseek,
693 /**********************************************************************
694 * AFS Linux dentry operations
695 **********************************************************************/
697 /* fix_bad_parent() : called if this dentry's vcache is a root vcache
698 * that has its mvid (parent dir's fid) pointer set to the wrong directory
699 * due to being mounted in multiple points at once. fix_bad_parent()
700 * calls afs_lookup() to correct the vcache's mvid, as well as the volume's
701 * dotdotfid and mtpoint fid members.
703 * dp - dentry to be checked.
704 * credp - credentials
705 * vcp, pvc - item's and parent's vcache pointer
709 * This dentry's vcache's mvid will be set to the correct parent directory's
711 * This root vnode's volume will have its dotdotfid and mtpoint fids set
712 * to the correct parent and mountpoint fids.
716 fix_bad_parent(struct dentry *dp, cred_t *credp, struct vcache *vcp, struct vcache *pvc)
718 struct vcache *avc = NULL;
720 /* force a lookup, so vcp->mvid is fixed up */
721 afs_lookup(pvc, (char *)dp->d_name.name, &avc, credp);
722 if (!avc || vcp != avc) { /* bad, very bad.. */
723 afs_Trace4(afs_iclSetp, CM_TRACE_TMP_1S3L, ICL_TYPE_STRING,
724 "check_bad_parent: bad pointer returned from afs_lookup origvc newvc dentry",
725 ICL_TYPE_POINTER, vcp, ICL_TYPE_POINTER, avc,
726 ICL_TYPE_POINTER, dp);
729 AFS_RELE(AFSTOV(avc));
734 /* afs_linux_revalidate
735 * Ensure vcache is stat'd before use. Return 0 if entry is valid.
738 afs_linux_revalidate(struct dentry *dp)
741 struct vcache *vcp = VTOAFS(dp->d_inode);
745 if (afs_shuttingdown)
751 /* Make this a fast path (no crref), since it's called so often. */
752 if (vcp->states & CStatd) {
753 struct vcache *pvc = VTOAFS(dp->d_parent->d_inode);
755 if (*dp->d_name.name != '/' && vcp->mvstat == 2) { /* root vnode */
756 if (vcp->mvid->Fid.Volume != pvc->fid.Fid.Volume) { /* bad parent */
759 fix_bad_parent(dp); /* check and correct mvid */
768 /* This avoids the crref when we don't have to do it. Watch for
769 * changes in afs_getattr that don't get replicated here!
771 if (vcp->f.states & CStatd &&
772 (!afs_fakestat_enable || vcp->mvstat != 1) &&
774 (vType(vcp) == VDIR || vType(vcp) == VLNK)) {
775 code = afs_CopyOutAttrs(vcp, &vattr);
778 code = afs_getattr(vcp, &vattr, credp);
783 afs_fill_inode(AFSTOV(vcp), &vattr);
787 return afs_convert_code(code);
791 afs_linux_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
793 int err = afs_linux_revalidate(dentry);
795 generic_fillattr(dentry->d_inode, stat);
800 /* Validate a dentry. Return 1 if unchanged, 0 if VFS layer should re-evaluate.
801 * In kernels 2.2.10 and above, we are passed an additional flags var which
802 * may have either the LOOKUP_FOLLOW OR LOOKUP_DIRECTORY set in which case
803 * we are advised to follow the entry if it is a link or to make sure that
804 * it is a directory. But since the kernel itself checks these possibilities
805 * later on, we shouldn't have to do it until later. Perhaps in the future..
807 * The code here assumes that on entry the global lock is not held
810 #ifdef DOP_REVALIDATE_TAKES_NAMEIDATA
811 afs_linux_dentry_revalidate(struct dentry *dp, struct nameidata *nd)
813 afs_linux_dentry_revalidate(struct dentry *dp, int flags)
817 cred_t *credp = NULL;
818 struct vcache *vcp, *pvcp, *tvc = NULL;
819 struct dentry *parent;
821 struct afs_fakestat_state fakestate;
825 /* We don't support RCU path walking */
826 if (nd->flags & LOOKUP_RCU)
830 afs_InitFakeStat(&fakestate);
833 vcp = VTOAFS(dp->d_inode);
835 if (vcp == afs_globalVp)
838 parent = dget_parent(dp);
839 pvcp = VTOAFS(parent->d_inode);
841 if ((vcp->mvstat == 1) || (vcp->mvstat == 2)) { /* need to lock */
847 if (locked && vcp->mvstat == 1) { /* mount point */
848 if (vcp->mvid && (vcp->f.states & CMValid)) {
851 struct vrequest treq;
853 code = afs_InitReq(&treq, credp);
855 (strcmp(dp->d_name.name, ".directory") == 0)) {
859 code = afs_TryEvalFakeStat(&vcp, &fakestate, &treq);
861 code = afs_EvalFakeStat(&vcp, &fakestate, &treq);
862 if ((tryEvalOnly && vcp->mvstat == 1) || code) {
863 /* a mount point, not yet replaced by its directory */
868 if (locked && *dp->d_name.name != '/' && vcp->mvstat == 2) { /* root vnode */
869 if (vcp->mvid->Fid.Volume != pvcp->f.fid.Fid.Volume) { /* bad parent */
870 fix_bad_parent(dp, credp, vcp, pvcp); /* check and correct mvid */
875 /* If the last looker changes, we should make sure the current
876 * looker still has permission to examine this file. This would
877 * always require a crref() which would be "slow".
879 if (vcp->last_looker != treq.uid) {
880 if (!afs_AccessOK(vcp, (vType(vcp) == VREG) ? PRSFS_READ : PRSFS_LOOKUP, &treq, CHECK_MODE_BITS))
883 vcp->last_looker = treq.uid;
888 /* If the parent's DataVersion has changed or the vnode
889 * is longer valid, we need to do a full lookup. VerifyVCache
890 * isn't enough since the vnode may have been renamed.
893 if ((!locked) && (hgetlo(pvcp->f.m.DataVersion) > dp->d_time || !(vcp->f.states & CStatd)) ) {
899 if (locked && (hgetlo(pvcp->f.m.DataVersion) > dp->d_time || !(vcp->f.states & CStatd))) {
900 afs_lookup(pvcp, (char *)dp->d_name.name, &tvc, credp);
901 if (!tvc || tvc != vcp) {
906 if (afs_getattr(vcp, &vattr, credp)) {
911 vattr2inode(AFSTOV(vcp), &vattr);
912 dp->d_time = hgetlo(pvcp->f.m.DataVersion);
915 /* should we always update the attributes at this point? */
916 /* unlikely--the vcache entry hasn't changed */
921 /* If this code is ever enabled, we should use dget_parent to handle
922 * getting the parent, and dput() to dispose of it. See above for an
924 pvcp = VTOAFS(dp->d_parent->d_inode);
925 if (hgetlo(pvcp->f.m.DataVersion) > dp->d_time)
929 /* No change in parent's DataVersion so this negative
930 * lookup is still valid. BUT, if a server is down a
931 * negative lookup can result so there should be a
932 * liftime as well. For now, always expire.
945 afs_PutFakeStat(&fakestate); /* from here on vcp may be no longer valid */
947 /* we hold the global lock if we evaluated a mount point */
954 shrink_dcache_parent(dp);
960 if (have_submounts(dp))
968 afs_dentry_iput(struct dentry *dp, struct inode *ip)
970 struct vcache *vcp = VTOAFS(ip);
973 if (!AFS_IS_DISCONNECTED || (vcp->f.states & CUnlinked)) {
974 (void) afs_InactiveVCache(vcp, NULL);
977 afs_linux_clear_nfsfs_renamed(dp);
983 afs_dentry_delete(struct dentry *dp)
985 if (dp->d_inode && (VTOAFS(dp->d_inode)->f.states & CUnlinked))
986 return 1; /* bad inode? */
991 struct dentry_operations afs_dentry_operations = {
992 .d_revalidate = afs_linux_dentry_revalidate,
993 .d_delete = afs_dentry_delete,
994 .d_iput = afs_dentry_iput,
997 /**********************************************************************
998 * AFS Linux inode operations
999 **********************************************************************/
1003 * Merely need to set enough of vattr to get us through the create. Note
1004 * that the higher level code (open_namei) will take care of any tuncation
1005 * explicitly. Exclusive open is also taken care of in open_namei.
1007 * name is in kernel space at this point.
1010 #ifdef IOP_CREATE_TAKES_NAMEIDATA
1011 afs_linux_create(struct inode *dip, struct dentry *dp, int mode,
1012 struct nameidata *nd)
1014 afs_linux_create(struct inode *dip, struct dentry *dp, int mode)
1018 cred_t *credp = crref();
1019 const char *name = dp->d_name.name;
1024 vattr.va_mode = mode;
1025 vattr.va_type = mode & S_IFMT;
1028 code = afs_create(VTOAFS(dip), (char *)name, &vattr, NONEXCL, mode,
1032 struct inode *ip = AFSTOV(vcp);
1034 afs_getattr(vcp, &vattr, credp);
1035 afs_fill_inode(ip, &vattr);
1036 insert_inode_hash(ip);
1037 #if !defined(STRUCT_SUPER_BLOCK_HAS_S_D_OP)
1038 dp->d_op = &afs_dentry_operations;
1040 dp->d_time = hgetlo(VTOAFS(dip)->f.m.DataVersion);
1041 d_instantiate(dp, ip);
1046 return afs_convert_code(code);
1049 /* afs_linux_lookup */
1050 static struct dentry *
1051 #ifdef IOP_LOOKUP_TAKES_NAMEIDATA
1052 afs_linux_lookup(struct inode *dip, struct dentry *dp,
1053 struct nameidata *nd)
1055 afs_linux_lookup(struct inode *dip, struct dentry *dp)
1058 cred_t *credp = crref();
1059 struct vcache *vcp = NULL;
1060 const char *comp = dp->d_name.name;
1061 struct inode *ip = NULL;
1062 struct dentry *newdp = NULL;
1066 code = afs_lookup(VTOAFS(dip), (char *)comp, &vcp, credp);
1072 afs_getattr(vcp, &vattr, credp);
1073 afs_fill_inode(ip, &vattr);
1074 if (hlist_unhashed(&ip->i_hash))
1075 insert_inode_hash(ip);
1077 #if !defined(STRUCT_SUPER_BLOCK_HAS_S_D_OP)
1078 dp->d_op = &afs_dentry_operations;
1080 dp->d_time = hgetlo(VTOAFS(dip)->f.m.DataVersion);
1083 if (ip && S_ISDIR(ip->i_mode)) {
1084 struct dentry *alias;
1086 /* Try to invalidate an existing alias in favor of our new one */
1087 alias = d_find_alias(ip);
1088 /* But not if it's disconnected; then we want d_splice_alias below */
1089 if (alias && !(alias->d_flags & DCACHE_DISCONNECTED)) {
1090 if (d_invalidate(alias) == 0) {
1099 newdp = d_splice_alias(ip, dp);
1103 /* It's ok for the file to not be found. That's noted by the caller by
1104 * seeing that the dp->d_inode field is NULL.
1106 if (!code || code == ENOENT)
1109 return ERR_PTR(afs_convert_code(code));
1113 afs_linux_link(struct dentry *olddp, struct inode *dip, struct dentry *newdp)
1116 cred_t *credp = crref();
1117 const char *name = newdp->d_name.name;
1118 struct inode *oldip = olddp->d_inode;
1120 /* If afs_link returned the vnode, we could instantiate the
1121 * dentry. Since it's not, we drop this one and do a new lookup.
1126 code = afs_link(VTOAFS(oldip), VTOAFS(dip), (char *)name, credp);
1130 return afs_convert_code(code);
1133 /* We have to have a Linux specific sillyrename function, because we
1134 * also have to keep the dcache up to date when we're doing a silly
1135 * rename - so we don't want the generic vnodeops doing this behind our
1140 afs_linux_sillyrename(struct inode *dir, struct dentry *dentry,
1143 struct vcache *tvc = VTOAFS(dentry->d_inode);
1144 struct dentry *__dp = NULL;
1145 char *__name = NULL;
1148 if (afs_linux_nfsfs_renamed(dentry))
1156 osi_FreeSmallSpace(__name);
1157 __name = afs_newname();
1160 __dp = lookup_one_len(__name, dentry->d_parent, strlen(__name));
1163 osi_FreeSmallSpace(__name);
1166 } while (__dp->d_inode != NULL);
1169 code = afs_rename(VTOAFS(dir), (char *)dentry->d_name.name,
1170 VTOAFS(dir), (char *)__dp->d_name.name,
1173 tvc->mvid = (void *) __name;
1176 crfree(tvc->uncred);
1178 tvc->uncred = credp;
1179 tvc->f.states |= CUnlinked;
1180 afs_linux_set_nfsfs_renamed(dentry);
1182 osi_FreeSmallSpace(__name);
1187 __dp->d_time = hgetlo(VTOAFS(dir)->f.m.DataVersion);
1188 d_move(dentry, __dp);
1197 afs_linux_unlink(struct inode *dip, struct dentry *dp)
1200 cred_t *credp = crref();
1201 const char *name = dp->d_name.name;
1202 struct vcache *tvc = VTOAFS(dp->d_inode);
1204 if (VREFCOUNT(tvc) > 1 && tvc->opens > 0
1205 && !(tvc->f.states & CUnlinked)) {
1207 code = afs_linux_sillyrename(dip, dp, credp);
1210 code = afs_remove(VTOAFS(dip), (char *)name, credp);
1217 return afs_convert_code(code);
1222 afs_linux_symlink(struct inode *dip, struct dentry *dp, const char *target)
1225 cred_t *credp = crref();
1227 const char *name = dp->d_name.name;
1229 /* If afs_symlink returned the vnode, we could instantiate the
1230 * dentry. Since it's not, we drop this one and do a new lookup.
1236 code = afs_symlink(VTOAFS(dip), (char *)name, &vattr, (char *)target, credp);
1239 return afs_convert_code(code);
1243 afs_linux_mkdir(struct inode *dip, struct dentry *dp, int mode)
1246 cred_t *credp = crref();
1247 struct vcache *tvcp = NULL;
1249 const char *name = dp->d_name.name;
1252 vattr.va_mask = ATTR_MODE;
1253 vattr.va_mode = mode;
1255 code = afs_mkdir(VTOAFS(dip), (char *)name, &vattr, &tvcp, credp);
1258 struct inode *ip = AFSTOV(tvcp);
1260 afs_getattr(tvcp, &vattr, credp);
1261 afs_fill_inode(ip, &vattr);
1263 #if !defined(STRUCT_SUPER_BLOCK_HAS_S_D_OP)
1264 dp->d_op = &afs_dentry_operations;
1266 dp->d_time = hgetlo(VTOAFS(dip)->f.m.DataVersion);
1267 d_instantiate(dp, ip);
1272 return afs_convert_code(code);
1276 afs_linux_rmdir(struct inode *dip, struct dentry *dp)
1279 cred_t *credp = crref();
1280 const char *name = dp->d_name.name;
1282 /* locking kernel conflicts with glock? */
1285 code = afs_rmdir(VTOAFS(dip), (char *)name, credp);
1288 /* Linux likes to see ENOTEMPTY returned from an rmdir() syscall
1289 * that failed because a directory is not empty. So, we map
1290 * EEXIST to ENOTEMPTY on linux.
1292 if (code == EEXIST) {
1301 return afs_convert_code(code);
1306 afs_linux_rename(struct inode *oldip, struct dentry *olddp,
1307 struct inode *newip, struct dentry *newdp)
1310 cred_t *credp = crref();
1311 const char *oldname = olddp->d_name.name;
1312 const char *newname = newdp->d_name.name;
1313 struct dentry *rehash = NULL;
1315 /* Prevent any new references during rename operation. */
1317 if (!d_unhashed(newdp)) {
1322 #if defined(D_COUNT_INT)
1323 spin_lock(&olddp->d_lock);
1324 if (olddp->d_count > 1) {
1325 spin_unlock(&olddp->d_lock);
1326 shrink_dcache_parent(olddp);
1328 spin_unlock(&olddp->d_lock);
1330 if (atomic_read(&olddp->d_count) > 1)
1331 shrink_dcache_parent(olddp);
1335 code = afs_rename(VTOAFS(oldip), (char *)oldname, VTOAFS(newip), (char *)newname, credp);
1339 olddp->d_time = 0; /* force to revalidate */
1345 return afs_convert_code(code);
1349 /* afs_linux_ireadlink
1350 * Internal readlink which can return link contents to user or kernel space.
1351 * Note that the buffer is NOT supposed to be null-terminated.
1354 afs_linux_ireadlink(struct inode *ip, char *target, int maxlen, uio_seg_t seg)
1357 cred_t *credp = crref();
1361 setup_uio(&tuio, &iov, target, (afs_offs_t) 0, maxlen, UIO_READ, seg);
1362 code = afs_readlink(VTOAFS(ip), &tuio, credp);
1366 return maxlen - tuio.uio_resid;
1368 return afs_convert_code(code);
1371 #if !defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
1372 /* afs_linux_readlink
1373 * Fill target (which is in user space) with contents of symlink.
1376 afs_linux_readlink(struct dentry *dp, char *target, int maxlen)
1379 struct inode *ip = dp->d_inode;
1382 code = afs_linux_ireadlink(ip, target, maxlen, AFS_UIOUSER);
1388 /* afs_linux_follow_link
1389 * a file system dependent link following routine.
1391 static int afs_linux_follow_link(struct dentry *dentry, struct nameidata *nd)
1396 name = osi_Alloc(PATH_MAX);
1402 code = afs_linux_ireadlink(dentry->d_inode, name, PATH_MAX - 1, AFS_UIOSYS);
1410 nd_set_link(nd, name);
1415 afs_linux_put_link(struct dentry *dentry, struct nameidata *nd)
1417 char *name = nd_get_link(nd);
1418 if (name && !IS_ERR(name)) {
1419 osi_Free(name, PATH_MAX);
1423 #endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
1425 /* Populate a page by filling it from the cache file pointed at by cachefp
1426 * (which contains indicated chunk)
1427 * If task is NULL, the page copy occurs syncronously, and the routine
1428 * returns with page still locked. If task is non-NULL, then page copies
1429 * may occur in the background, and the page will be unlocked when it is
1433 afs_linux_read_cache(struct file *cachefp, struct page *page,
1434 int chunk, struct pagevec *lrupv,
1435 struct afs_pagecopy_task *task) {
1436 loff_t offset = page_offset(page);
1437 struct page *newpage, *cachepage;
1438 struct address_space *cachemapping;
1442 cachemapping = cachefp->f_dentry->d_inode->i_mapping;
1446 /* From our offset, we now need to work out which page in the disk
1447 * file it corresponds to. This will be fun ... */
1448 pageindex = (offset - AFS_CHUNKTOBASE(chunk)) >> PAGE_CACHE_SHIFT;
1450 while (cachepage == NULL) {
1451 cachepage = find_get_page(cachemapping, pageindex);
1454 newpage = page_cache_alloc_cold(cachemapping);
1460 code = add_to_page_cache(newpage, cachemapping,
1461 pageindex, GFP_KERNEL);
1463 cachepage = newpage;
1466 page_cache_get(cachepage);
1467 if (!pagevec_add(lrupv, cachepage))
1468 __pagevec_lru_add_file(lrupv);
1471 page_cache_release(newpage);
1473 if (code != -EEXIST)
1477 lock_page(cachepage);
1481 if (!PageUptodate(cachepage)) {
1482 ClearPageError(cachepage);
1483 code = cachemapping->a_ops->readpage(NULL, cachepage);
1484 if (!code && !task) {
1485 wait_on_page_locked(cachepage);
1488 unlock_page(cachepage);
1492 if (PageUptodate(cachepage)) {
1493 copy_highpage(page, cachepage);
1494 flush_dcache_page(page);
1495 SetPageUptodate(page);
1500 afs_pagecopy_queue_page(task, cachepage, page);
1512 page_cache_release(cachepage);
1518 afs_linux_readpage_fastpath(struct file *fp, struct page *pp, int *codep)
1520 loff_t offset = page_offset(pp);
1521 struct inode *ip = FILE_INODE(fp);
1522 struct vcache *avc = VTOAFS(ip);
1524 struct file *cacheFp = NULL;
1527 struct pagevec lrupv;
1529 /* Not a UFS cache, don't do anything */
1530 if (cacheDiskType != AFS_FCACHE_TYPE_UFS)
1533 /* Can't do anything if the vcache isn't statd , or if the read
1534 * crosses a chunk boundary.
1536 if (!(avc->f.states & CStatd) ||
1537 AFS_CHUNK(offset) != AFS_CHUNK(offset + PAGE_SIZE)) {
1541 ObtainWriteLock(&avc->lock, 911);
1543 /* XXX - See if hinting actually makes things faster !!! */
1545 /* See if we have a suitable entry already cached */
1549 /* We need to lock xdcache, then dcache, to handle situations where
1550 * the hint is on the free list. However, we can't safely do this
1551 * according to the locking hierarchy. So, use a non blocking lock.
1553 ObtainReadLock(&afs_xdcache);
1554 dcLocked = ( 0 == NBObtainReadLock(&tdc->lock));
1556 if (dcLocked && (tdc->index != NULLIDX)
1557 && !FidCmp(&tdc->f.fid, &avc->f.fid)
1558 && tdc->f.chunk == AFS_CHUNK(offset)
1559 && !(afs_indexFlags[tdc->index] & (IFFree | IFDiscarded))) {
1560 /* Bonus - the hint was correct */
1563 /* Only destroy the hint if its actually invalid, not if there's
1564 * just been a locking failure */
1566 ReleaseReadLock(&tdc->lock);
1573 ReleaseReadLock(&afs_xdcache);
1576 /* No hint, or hint is no longer valid - see if we can get something
1577 * directly from the dcache
1580 tdc = afs_FindDCache(avc, offset);
1583 ReleaseWriteLock(&avc->lock);
1588 ObtainReadLock(&tdc->lock);
1590 /* Is the dcache we've been given currently up to date */
1591 if (!hsame(avc->f.m.DataVersion, tdc->f.versionNo) ||
1592 (tdc->dflags & DFFetching)) {
1593 ReleaseWriteLock(&avc->lock);
1594 ReleaseReadLock(&tdc->lock);
1599 /* Update our hint for future abuse */
1602 /* Okay, so we've now got a cache file that is up to date */
1604 /* XXX - I suspect we should be locking the inodes before we use them! */
1606 cacheFp = afs_linux_raw_open(&tdc->f.inode);
1607 pagevec_init(&lrupv, 0);
1609 code = afs_linux_read_cache(cacheFp, pp, tdc->f.chunk, &lrupv, NULL);
1611 if (pagevec_count(&lrupv))
1612 __pagevec_lru_add_file(&lrupv);
1614 filp_close(cacheFp, NULL);
1617 ReleaseReadLock(&tdc->lock);
1618 ReleaseWriteLock(&avc->lock);
1625 /* afs_linux_readpage
1627 * This function is split into two, because prepare_write/begin_write
1628 * require a readpage call which doesn't unlock the resulting page upon
1632 afs_linux_fillpage(struct file *fp, struct page *pp)
1637 struct iovec *iovecp;
1638 struct inode *ip = FILE_INODE(fp);
1639 afs_int32 cnt = page_count(pp);
1640 struct vcache *avc = VTOAFS(ip);
1641 afs_offs_t offset = page_offset(pp);
1645 if (afs_linux_readpage_fastpath(fp, pp, &code)) {
1655 auio = osi_Alloc(sizeof(uio_t));
1656 iovecp = osi_Alloc(sizeof(struct iovec));
1658 setup_uio(auio, iovecp, (char *)address, offset, PAGE_SIZE, UIO_READ,
1663 afs_Trace4(afs_iclSetp, CM_TRACE_READPAGE, ICL_TYPE_POINTER, ip,
1664 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, cnt, ICL_TYPE_INT32,
1665 99999); /* not a possible code value */
1667 code = afs_rdwr(avc, auio, UIO_READ, 0, credp);
1669 afs_Trace4(afs_iclSetp, CM_TRACE_READPAGE, ICL_TYPE_POINTER, ip,
1670 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, cnt, ICL_TYPE_INT32,
1672 AFS_DISCON_UNLOCK();
1675 /* XXX valid for no-cache also? Check last bits of files... :)
1676 * Cognate code goes in afs_NoCacheFetchProc. */
1677 if (auio->uio_resid) /* zero remainder of page */
1678 memset((void *)(address + (PAGE_SIZE - auio->uio_resid)), 0,
1681 flush_dcache_page(pp);
1682 SetPageUptodate(pp);
1687 osi_Free(auio, sizeof(uio_t));
1688 osi_Free(iovecp, sizeof(struct iovec));
1691 return afs_convert_code(code);
1695 afs_linux_prefetch(struct file *fp, struct page *pp)
1698 struct vcache *avc = VTOAFS(FILE_INODE(fp));
1699 afs_offs_t offset = page_offset(pp);
1701 if (AFS_CHUNKOFFSET(offset) == 0) {
1703 struct vrequest treq;
1708 code = afs_InitReq(&treq, credp);
1709 if (!code && !NBObtainWriteLock(&avc->lock, 534)) {
1710 tdc = afs_FindDCache(avc, offset);
1712 if (!(tdc->mflags & DFNextStarted))
1713 afs_PrefetchChunk(avc, tdc, credp, &treq);
1716 ReleaseWriteLock(&avc->lock);
1721 return afs_convert_code(code);
1726 afs_linux_bypass_readpages(struct file *fp, struct address_space *mapping,
1727 struct list_head *page_list, unsigned num_pages)
1732 struct iovec* iovecp;
1733 struct nocache_read_request *ancr;
1735 struct pagevec lrupv;
1739 struct inode *ip = FILE_INODE(fp);
1740 struct vcache *avc = VTOAFS(ip);
1741 afs_int32 base_index = 0;
1742 afs_int32 page_count = 0;
1745 /* background thread must free: iovecp, auio, ancr */
1746 iovecp = osi_Alloc(num_pages * sizeof(struct iovec));
1748 auio = osi_Alloc(sizeof(uio_t));
1749 auio->uio_iov = iovecp;
1750 auio->uio_iovcnt = num_pages;
1751 auio->uio_flag = UIO_READ;
1752 auio->uio_seg = AFS_UIOSYS;
1753 auio->uio_resid = num_pages * PAGE_SIZE;
1755 ancr = osi_Alloc(sizeof(struct nocache_read_request));
1757 ancr->offset = auio->uio_offset;
1758 ancr->length = auio->uio_resid;
1760 pagevec_init(&lrupv, 0);
1762 for(page_ix = 0; page_ix < num_pages; ++page_ix) {
1764 if(list_empty(page_list))
1767 pp = list_entry(page_list->prev, struct page, lru);
1768 /* If we allocate a page and don't remove it from page_list,
1769 * the page cache gets upset. */
1771 isize = (i_size_read(fp->f_mapping->host) - 1) >> PAGE_CACHE_SHIFT;
1772 if(pp->index > isize) {
1779 offset = page_offset(pp);
1780 auio->uio_offset = offset;
1781 base_index = pp->index;
1783 iovecp[page_ix].iov_len = PAGE_SIZE;
1784 code = add_to_page_cache(pp, mapping, pp->index, GFP_KERNEL);
1785 if(base_index != pp->index) {
1788 page_cache_release(pp);
1789 iovecp[page_ix].iov_base = (void *) 0;
1791 ancr->length -= PAGE_SIZE;
1798 page_cache_release(pp);
1799 iovecp[page_ix].iov_base = (void *) 0;
1802 if(!PageLocked(pp)) {
1806 /* increment page refcount--our original design assumed
1807 * that locking it would effectively pin it; protect
1808 * ourselves from the possiblity that this assumption is
1809 * is faulty, at low cost (provided we do not fail to
1810 * do the corresponding decref on the other side) */
1813 /* save the page for background map */
1814 iovecp[page_ix].iov_base = (void*) pp;
1816 /* and put it on the LRU cache */
1817 if (!pagevec_add(&lrupv, pp))
1818 __pagevec_lru_add_file(&lrupv);
1822 /* If there were useful pages in the page list, make sure all pages
1823 * are in the LRU cache, then schedule the read */
1825 if (pagevec_count(&lrupv))
1826 __pagevec_lru_add_file(&lrupv);
1828 code = afs_ReadNoCache(avc, ancr, credp);
1831 /* If there is nothing for the background thread to handle,
1832 * it won't be freeing the things that we never gave it */
1833 osi_Free(iovecp, num_pages * sizeof(struct iovec));
1834 osi_Free(auio, sizeof(uio_t));
1835 osi_Free(ancr, sizeof(struct nocache_read_request));
1837 /* we do not flush, release, or unmap pages--that will be
1838 * done for us by the background thread as each page comes in
1839 * from the fileserver */
1840 return afs_convert_code(code);
1845 afs_linux_bypass_readpage(struct file *fp, struct page *pp)
1847 cred_t *credp = NULL;
1849 struct iovec *iovecp;
1850 struct nocache_read_request *ancr;
1854 * Special case: if page is at or past end of file, just zero it and set
1857 if (page_offset(pp) >= i_size_read(fp->f_mapping->host)) {
1858 zero_user_segment(pp, 0, PAGE_CACHE_SIZE);
1859 SetPageUptodate(pp);
1866 /* receiver frees */
1867 auio = osi_Alloc(sizeof(uio_t));
1868 iovecp = osi_Alloc(sizeof(struct iovec));
1870 /* address can be NULL, because we overwrite it with 'pp', below */
1871 setup_uio(auio, iovecp, NULL, page_offset(pp),
1872 PAGE_SIZE, UIO_READ, AFS_UIOSYS);
1874 /* save the page for background map */
1875 get_page(pp); /* see above */
1876 auio->uio_iov->iov_base = (void*) pp;
1877 /* the background thread will free this */
1878 ancr = osi_Alloc(sizeof(struct nocache_read_request));
1880 ancr->offset = page_offset(pp);
1881 ancr->length = PAGE_SIZE;
1884 code = afs_ReadNoCache(VTOAFS(FILE_INODE(fp)), ancr, credp);
1887 return afs_convert_code(code);
1891 afs_linux_can_bypass(struct inode *ip) {
1892 switch(cache_bypass_strategy) {
1893 case NEVER_BYPASS_CACHE:
1895 case ALWAYS_BYPASS_CACHE:
1897 case LARGE_FILES_BYPASS_CACHE:
1898 if(i_size_read(ip) > cache_bypass_threshold)
1905 /* Check if a file is permitted to bypass the cache by policy, and modify
1906 * the cache bypass state recorded for that file */
1909 afs_linux_bypass_check(struct inode *ip) {
1912 int bypass = afs_linux_can_bypass(ip);
1915 trydo_cache_transition(VTOAFS(ip), credp, bypass);
1923 afs_linux_readpage(struct file *fp, struct page *pp)
1927 if (afs_linux_bypass_check(FILE_INODE(fp))) {
1928 code = afs_linux_bypass_readpage(fp, pp);
1930 code = afs_linux_fillpage(fp, pp);
1932 code = afs_linux_prefetch(fp, pp);
1939 /* Readpages reads a number of pages for a particular file. We use
1940 * this to optimise the reading, by limiting the number of times upon which
1941 * we have to lookup, lock and open vcaches and dcaches
1945 afs_linux_readpages(struct file *fp, struct address_space *mapping,
1946 struct list_head *page_list, unsigned int num_pages)
1948 struct inode *inode = mapping->host;
1949 struct vcache *avc = VTOAFS(inode);
1951 struct file *cacheFp = NULL;
1953 unsigned int page_idx;
1955 struct pagevec lrupv;
1956 struct afs_pagecopy_task *task;
1958 if (afs_linux_bypass_check(inode))
1959 return afs_linux_bypass_readpages(fp, mapping, page_list, num_pages);
1961 if (cacheDiskType == AFS_FCACHE_TYPE_MEM)
1965 if ((code = afs_linux_VerifyVCache(avc, NULL))) {
1970 ObtainWriteLock(&avc->lock, 912);
1973 task = afs_pagecopy_init_task();
1976 pagevec_init(&lrupv, 0);
1977 for (page_idx = 0; page_idx < num_pages; page_idx++) {
1978 struct page *page = list_entry(page_list->prev, struct page, lru);
1979 list_del(&page->lru);
1980 offset = page_offset(page);
1982 if (tdc && tdc->f.chunk != AFS_CHUNK(offset)) {
1984 ReleaseReadLock(&tdc->lock);
1989 filp_close(cacheFp, NULL);
1994 if ((tdc = afs_FindDCache(avc, offset))) {
1995 ObtainReadLock(&tdc->lock);
1996 if (!hsame(avc->f.m.DataVersion, tdc->f.versionNo) ||
1997 (tdc->dflags & DFFetching)) {
1998 ReleaseReadLock(&tdc->lock);
2005 cacheFp = afs_linux_raw_open(&tdc->f.inode);
2008 if (tdc && !add_to_page_cache(page, mapping, page->index,
2010 page_cache_get(page);
2011 if (!pagevec_add(&lrupv, page))
2012 __pagevec_lru_add_file(&lrupv);
2014 afs_linux_read_cache(cacheFp, page, tdc->f.chunk, &lrupv, task);
2016 page_cache_release(page);
2018 if (pagevec_count(&lrupv))
2019 __pagevec_lru_add_file(&lrupv);
2022 filp_close(cacheFp, NULL);
2024 afs_pagecopy_put_task(task);
2028 ReleaseReadLock(&tdc->lock);
2032 ReleaseWriteLock(&avc->lock);
2037 /* Prepare an AFS vcache for writeback. Should be called with the vcache
2040 afs_linux_prepare_writeback(struct vcache *avc) {
2041 if (avc->f.states & CPageWrite) {
2042 return AOP_WRITEPAGE_ACTIVATE;
2044 avc->f.states |= CPageWrite;
2049 afs_linux_dopartialwrite(struct vcache *avc, cred_t *credp) {
2050 struct vrequest treq;
2053 if (!afs_InitReq(&treq, credp))
2054 code = afs_DoPartialWrite(avc, &treq);
2056 return afs_convert_code(code);
2060 afs_linux_complete_writeback(struct vcache *avc) {
2061 avc->f.states &= ~CPageWrite;
2064 /* Writeback a given page syncronously. Called with no AFS locks held */
2066 afs_linux_page_writeback(struct inode *ip, struct page *pp,
2067 unsigned long offset, unsigned int count,
2070 struct vcache *vcp = VTOAFS(ip);
2078 buffer = kmap(pp) + offset;
2079 base = page_offset(pp) + offset;
2082 afs_Trace4(afs_iclSetp, CM_TRACE_UPDATEPAGE, ICL_TYPE_POINTER, vcp,
2083 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, page_count(pp),
2084 ICL_TYPE_INT32, 99999);
2086 setup_uio(&tuio, &iovec, buffer, base, count, UIO_WRITE, AFS_UIOSYS);
2088 code = afs_write(vcp, &tuio, f_flags, credp, 0);
2090 i_size_write(ip, vcp->f.m.Length);
2091 ip->i_blocks = ((vcp->f.m.Length + 1023) >> 10) << 1;
2093 code = code ? afs_convert_code(code) : count - tuio.uio_resid;
2095 afs_Trace4(afs_iclSetp, CM_TRACE_UPDATEPAGE, ICL_TYPE_POINTER, vcp,
2096 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, page_count(pp),
2097 ICL_TYPE_INT32, code);
2106 afs_linux_writepage_sync(struct inode *ip, struct page *pp,
2107 unsigned long offset, unsigned int count)
2111 struct vcache *vcp = VTOAFS(ip);
2114 /* Catch recursive writeback. This occurs if the kernel decides
2115 * writeback is required whilst we are writing to the cache, or
2116 * flushing to the server. When we're running syncronously (as
2117 * opposed to from writepage) we can't actually do anything about
2118 * this case - as we can't return AOP_WRITEPAGE_ACTIVATE to write()
2121 ObtainWriteLock(&vcp->lock, 532);
2122 afs_linux_prepare_writeback(vcp);
2123 ReleaseWriteLock(&vcp->lock);
2127 code = afs_linux_page_writeback(ip, pp, offset, count, credp);
2130 ObtainWriteLock(&vcp->lock, 533);
2132 code1 = afs_linux_dopartialwrite(vcp, credp);
2133 afs_linux_complete_writeback(vcp);
2134 ReleaseWriteLock(&vcp->lock);
2145 #ifdef AOP_WRITEPAGE_TAKES_WRITEBACK_CONTROL
2146 afs_linux_writepage(struct page *pp, struct writeback_control *wbc)
2148 afs_linux_writepage(struct page *pp)
2151 struct address_space *mapping = pp->mapping;
2152 struct inode *inode;
2155 unsigned int to = PAGE_CACHE_SIZE;
2160 if (PageReclaim(pp)) {
2161 return AOP_WRITEPAGE_ACTIVATE;
2162 /* XXX - Do we need to redirty the page here? */
2167 inode = mapping->host;
2168 vcp = VTOAFS(inode);
2169 isize = i_size_read(inode);
2171 /* Don't defeat an earlier truncate */
2172 if (page_offset(pp) > isize) {
2173 set_page_writeback(pp);
2179 ObtainWriteLock(&vcp->lock, 537);
2180 code = afs_linux_prepare_writeback(vcp);
2181 if (code == AOP_WRITEPAGE_ACTIVATE) {
2182 /* WRITEPAGE_ACTIVATE is the only return value that permits us
2183 * to return with the page still locked */
2184 ReleaseWriteLock(&vcp->lock);
2189 /* Grab the creds structure currently held in the vnode, and
2190 * get a reference to it, in case it goes away ... */
2196 ReleaseWriteLock(&vcp->lock);
2199 set_page_writeback(pp);
2201 SetPageUptodate(pp);
2203 /* We can unlock the page here, because it's protected by the
2204 * page_writeback flag. This should make us less vulnerable to
2205 * deadlocking in afs_write and afs_DoPartialWrite
2209 /* If this is the final page, then just write the number of bytes that
2210 * are actually in it */
2211 if ((isize - page_offset(pp)) < to )
2212 to = isize - page_offset(pp);
2214 code = afs_linux_page_writeback(inode, pp, 0, to, credp);
2217 ObtainWriteLock(&vcp->lock, 538);
2219 /* As much as we might like to ignore a file server error here,
2220 * and just try again when we close(), unfortunately StoreAllSegments
2221 * will invalidate our chunks if the server returns a permanent error,
2222 * so we need to at least try and get that error back to the user
2225 code1 = afs_linux_dopartialwrite(vcp, credp);
2227 afs_linux_complete_writeback(vcp);
2228 ReleaseWriteLock(&vcp->lock);
2233 end_page_writeback(pp);
2234 page_cache_release(pp);
2245 /* afs_linux_permission
2246 * Check access rights - returns error if can't check or permission denied.
2249 #if defined(IOP_PERMISSION_TAKES_FLAGS)
2250 afs_linux_permission(struct inode *ip, int mode, unsigned int flags)
2251 #elif defined(IOP_PERMISSION_TAKES_NAMEIDATA)
2252 afs_linux_permission(struct inode *ip, int mode, struct nameidata *nd)
2254 afs_linux_permission(struct inode *ip, int mode)
2261 #if defined(IOP_PERMISSION_TAKES_FLAGS)
2262 /* We don't support RCU path walking */
2263 if (flags & IPERM_FLAG_RCU)
2269 if (mode & MAY_EXEC)
2271 if (mode & MAY_READ)
2273 if (mode & MAY_WRITE)
2275 code = afs_access(VTOAFS(ip), tmp, credp);
2279 return afs_convert_code(code);
2283 afs_linux_commit_write(struct file *file, struct page *page, unsigned offset,
2287 struct inode *inode = FILE_INODE(file);
2288 loff_t pagebase = page_offset(page);
2290 if (i_size_read(inode) < (pagebase + offset))
2291 i_size_write(inode, pagebase + offset);
2293 if (PageChecked(page)) {
2294 SetPageUptodate(page);
2295 ClearPageChecked(page);
2298 code = afs_linux_writepage_sync(inode, page, offset, to - offset);
2304 afs_linux_prepare_write(struct file *file, struct page *page, unsigned from,
2308 /* http://kerneltrap.org/node/4941 details the expected behaviour of
2309 * prepare_write. Essentially, if the page exists within the file,
2310 * and is not being fully written, then we should populate it.
2313 if (!PageUptodate(page)) {
2314 loff_t pagebase = page_offset(page);
2315 loff_t isize = i_size_read(page->mapping->host);
2317 /* Is the location we are writing to beyond the end of the file? */
2318 if (pagebase >= isize ||
2319 ((from == 0) && (pagebase + to) >= isize)) {
2320 zero_user_segments(page, 0, from, to, PAGE_CACHE_SIZE);
2321 SetPageChecked(page);
2322 /* Are we we writing a full page */
2323 } else if (from == 0 && to == PAGE_CACHE_SIZE) {
2324 SetPageChecked(page);
2325 /* Is the page readable, if it's wronly, we don't care, because we're
2326 * not actually going to read from it ... */
2327 } else if ((file->f_flags && O_ACCMODE) != O_WRONLY) {
2328 /* We don't care if fillpage fails, because if it does the page
2329 * won't be marked as up to date
2331 afs_linux_fillpage(file, page);
2337 #if defined(STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_WRITE_BEGIN)
2339 afs_linux_write_end(struct file *file, struct address_space *mapping,
2340 loff_t pos, unsigned len, unsigned copied,
2341 struct page *page, void *fsdata)
2344 unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
2346 code = afs_linux_commit_write(file, page, from, from + len);
2349 page_cache_release(page);
2354 afs_linux_write_begin(struct file *file, struct address_space *mapping,
2355 loff_t pos, unsigned len, unsigned flags,
2356 struct page **pagep, void **fsdata)
2359 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2360 unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
2363 page = grab_cache_page_write_begin(mapping, index, flags);
2366 code = afs_linux_prepare_write(file, page, from, from + len);
2369 page_cache_release(page);
2377 static struct inode_operations afs_file_iops = {
2378 .permission = afs_linux_permission,
2379 .getattr = afs_linux_getattr,
2380 .setattr = afs_notify_change,
2383 static struct address_space_operations afs_file_aops = {
2384 .readpage = afs_linux_readpage,
2385 .readpages = afs_linux_readpages,
2386 .writepage = afs_linux_writepage,
2387 #if defined (STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_WRITE_BEGIN)
2388 .write_begin = afs_linux_write_begin,
2389 .write_end = afs_linux_write_end,
2391 .commit_write = afs_linux_commit_write,
2392 .prepare_write = afs_linux_prepare_write,
2397 /* Separate ops vector for directories. Linux 2.2 tests type of inode
2398 * by what sort of operation is allowed.....
2401 static struct inode_operations afs_dir_iops = {
2402 .setattr = afs_notify_change,
2403 .create = afs_linux_create,
2404 .lookup = afs_linux_lookup,
2405 .link = afs_linux_link,
2406 .unlink = afs_linux_unlink,
2407 .symlink = afs_linux_symlink,
2408 .mkdir = afs_linux_mkdir,
2409 .rmdir = afs_linux_rmdir,
2410 .rename = afs_linux_rename,
2411 .getattr = afs_linux_getattr,
2412 .permission = afs_linux_permission,
2415 /* We really need a separate symlink set of ops, since do_follow_link()
2416 * determines if it _is_ a link by checking if the follow_link op is set.
2418 #if defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
2420 afs_symlink_filler(struct file *file, struct page *page)
2422 struct inode *ip = (struct inode *)page->mapping->host;
2423 char *p = (char *)kmap(page);
2427 code = afs_linux_ireadlink(ip, p, PAGE_SIZE, AFS_UIOSYS);
2432 p[code] = '\0'; /* null terminate? */
2434 SetPageUptodate(page);
2446 static struct address_space_operations afs_symlink_aops = {
2447 .readpage = afs_symlink_filler
2449 #endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
2451 static struct inode_operations afs_symlink_iops = {
2452 #if defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
2453 .readlink = page_readlink,
2454 # if defined(HAVE_LINUX_PAGE_FOLLOW_LINK)
2455 .follow_link = page_follow_link,
2457 .follow_link = page_follow_link_light,
2458 .put_link = page_put_link,
2460 #else /* !defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE) */
2461 .readlink = afs_linux_readlink,
2462 .follow_link = afs_linux_follow_link,
2463 .put_link = afs_linux_put_link,
2464 #endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
2465 .setattr = afs_notify_change,
2469 afs_fill_inode(struct inode *ip, struct vattr *vattr)
2473 vattr2inode(ip, vattr);
2475 ip->i_mapping->backing_dev_info = afs_backing_dev_info;
2476 /* Reset ops if symlink or directory. */
2477 if (S_ISREG(ip->i_mode)) {
2478 ip->i_op = &afs_file_iops;
2479 ip->i_fop = &afs_file_fops;
2480 ip->i_data.a_ops = &afs_file_aops;
2482 } else if (S_ISDIR(ip->i_mode)) {
2483 ip->i_op = &afs_dir_iops;
2484 ip->i_fop = &afs_dir_fops;
2486 } else if (S_ISLNK(ip->i_mode)) {
2487 ip->i_op = &afs_symlink_iops;
2488 #if defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
2489 ip->i_data.a_ops = &afs_symlink_aops;
2490 ip->i_mapping = &ip->i_data;