2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
11 * Linux specific vnodeops. Also includes the glue routines required to call
14 * So far the only truly scary part is that Linux relies on the inode cache
15 * to be up to date. Don't you dare break a callback and expect an fstat
16 * to give you meaningful information. This appears to be fixed in the 2.1
17 * development kernels. As it is we can fix this now by intercepting the
21 #include <afsconfig.h>
22 #include "afs/param.h"
25 #include "afs/sysincludes.h"
26 #include "afsincludes.h"
27 #include "afs/afs_stats.h"
29 #ifdef HAVE_MM_INLINE_H
30 #include <linux/mm_inline.h>
32 #include <linux/pagemap.h>
33 #include <linux/smp_lock.h>
34 #include <linux/writeback.h>
35 #include <linux/pagevec.h>
36 #if defined(AFS_CACHE_BYPASS)
38 #include "afs/afs_bypasscache.h"
41 #include "osi_compat.h"
42 #include "osi_pagecopy.h"
44 #ifndef HAVE_PAGEVEC_LRU_ADD_FILE
45 #define __pagevec_lru_add_file __pagevec_lru_add
49 #define MAX_ERRNO 1000L
52 extern struct backing_dev_info afs_backing_dev_info;
54 extern struct vcache *afs_globalVp;
55 extern int afs_notify_change(struct dentry *dp, struct iattr *iattrp);
56 /* Some uses of BKL are perhaps not needed for bypass or memcache--
57 * why don't we try it out? */
58 extern struct afs_cacheOps afs_UfsCacheOps;
61 afs_maybe_lock_kernel(void) {
62 if(afs_cacheType == &afs_UfsCacheOps)
67 afs_maybe_unlock_kernel(void) {
68 if(afs_cacheType == &afs_UfsCacheOps)
72 /* This function converts a positive error code from AFS into a negative
73 * code suitable for passing into the Linux VFS layer. It checks that the
74 * error code is within the permissable bounds for the ERR_PTR mechanism.
76 * _All_ error codes which come from the AFS layer should be passed through
77 * this function before being returned to the kernel.
81 afs_convert_code(int code) {
82 if ((code >= 0) && (code <= MAX_ERRNO))
88 /* Linux doesn't require a credp for many functions, and crref is an expensive
89 * operation. This helper function avoids obtaining it for VerifyVCache calls
93 afs_linux_VerifyVCache(struct vcache *avc, cred_t **retcred) {
98 if (avc->f.states & CStatd) {
106 code = afs_InitReq(&treq, credp);
108 code = afs_VerifyVCache2(avc, &treq);
115 return afs_convert_code(code);
119 afs_linux_read(struct file *fp, char *buf, size_t count, loff_t * offp)
122 struct vcache *vcp = VTOAFS(fp->f_dentry->d_inode);
125 afs_Trace4(afs_iclSetp, CM_TRACE_READOP, ICL_TYPE_POINTER, vcp,
126 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
128 code = afs_linux_VerifyVCache(vcp, NULL);
131 /* Linux's FlushPages implementation doesn't ever use credp,
132 * so we optimise by not using it */
133 osi_FlushPages(vcp, NULL); /* ensure stale pages are gone */
135 code = do_sync_read(fp, buf, count, offp);
139 afs_Trace4(afs_iclSetp, CM_TRACE_READOP, ICL_TYPE_POINTER, vcp,
140 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
147 /* Now we have integrated VM for writes as well as reads. generic_file_write
148 * also takes care of re-positioning the pointer if file is open in append
149 * mode. Call fake open/close to ensure we do writes of core dumps.
152 afs_linux_write(struct file *fp, const char *buf, size_t count, loff_t * offp)
155 struct vcache *vcp = VTOAFS(fp->f_dentry->d_inode);
160 afs_Trace4(afs_iclSetp, CM_TRACE_WRITEOP, ICL_TYPE_POINTER, vcp,
161 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
162 (fp->f_flags & O_APPEND) ? 99998 : 99999);
164 code = afs_linux_VerifyVCache(vcp, &credp);
166 ObtainWriteLock(&vcp->lock, 529);
168 ReleaseWriteLock(&vcp->lock);
171 code = do_sync_write(fp, buf, count, offp);
175 ObtainWriteLock(&vcp->lock, 530);
177 if (vcp->execsOrWriters == 1 && !credp)
180 afs_FakeClose(vcp, credp);
181 ReleaseWriteLock(&vcp->lock);
183 afs_Trace4(afs_iclSetp, CM_TRACE_WRITEOP, ICL_TYPE_POINTER, vcp,
184 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
193 extern int BlobScan(struct dcache * afile, afs_int32 ablob);
195 /* This is a complete rewrite of afs_readdir, since we can make use of
196 * filldir instead of afs_readdir_move. Note that changes to vcache/dcache
197 * handling and use of bulkstats will need to be reflected here as well.
200 afs_linux_readdir(struct file *fp, void *dirbuf, filldir_t filldir)
202 struct vcache *avc = VTOAFS(FILE_INODE(fp));
203 struct vrequest treq;
204 register struct dcache *tdc;
211 afs_size_t origOffset, tlen;
212 cred_t *credp = crref();
213 struct afs_fakestat_state fakestat;
215 afs_maybe_lock_kernel();
217 AFS_STATCNT(afs_readdir);
219 code = afs_convert_code(afs_InitReq(&treq, credp));
224 afs_InitFakeStat(&fakestat);
225 code = afs_convert_code(afs_EvalFakeStat(&avc, &fakestat, &treq));
229 /* update the cache entry */
231 code = afs_convert_code(afs_VerifyVCache2(avc, &treq));
235 /* get a reference to the entire directory */
236 tdc = afs_GetDCache(avc, (afs_size_t) 0, &treq, &origOffset, &tlen, 1);
242 ObtainSharedLock(&avc->lock, 810);
243 UpgradeSToWLock(&avc->lock, 811);
244 ObtainReadLock(&tdc->lock);
246 * Make sure that the data in the cache is current. There are two
247 * cases we need to worry about:
248 * 1. The cache data is being fetched by another process.
249 * 2. The cache data is no longer valid
251 while ((avc->f.states & CStatd)
252 && (tdc->dflags & DFFetching)
253 && hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
254 ReleaseReadLock(&tdc->lock);
255 ReleaseSharedLock(&avc->lock);
256 afs_osi_Sleep(&tdc->validPos);
257 ObtainSharedLock(&avc->lock, 812);
258 ObtainReadLock(&tdc->lock);
260 if (!(avc->f.states & CStatd)
261 || !hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
262 ReleaseReadLock(&tdc->lock);
263 ReleaseSharedLock(&avc->lock);
268 /* Set the readdir-in-progress flag, and downgrade the lock
269 * to shared so others will be able to acquire a read lock.
271 avc->f.states |= CReadDir;
272 avc->dcreaddir = tdc;
273 avc->readdir_pid = MyPidxx2Pid(MyPidxx);
274 ConvertWToSLock(&avc->lock);
276 /* Fill in until we get an error or we're done. This implementation
277 * takes an offset in units of blobs, rather than bytes.
280 offset = (int) fp->f_pos;
282 dirpos = BlobScan(tdc, offset);
286 de = afs_dir_GetBlob(tdc, dirpos);
290 ino = afs_calc_inum (avc->f.fid.Fid.Volume, ntohl(de->fid.vnode));
293 len = strlen(de->name);
295 printf("afs_linux_readdir: afs_dir_GetBlob failed, null name (inode %lx, dirpos %d)\n",
296 (unsigned long)&tdc->f.inode, dirpos);
298 ReleaseSharedLock(&avc->lock);
304 /* filldir returns -EINVAL when the buffer is full. */
306 unsigned int type = DT_UNKNOWN;
307 struct VenusFid afid;
310 afid.Cell = avc->f.fid.Cell;
311 afid.Fid.Volume = avc->f.fid.Fid.Volume;
312 afid.Fid.Vnode = ntohl(de->fid.vnode);
313 afid.Fid.Unique = ntohl(de->fid.vunique);
314 if ((avc->f.states & CForeign) == 0 && (ntohl(de->fid.vnode) & 1)) {
316 } else if ((tvc = afs_FindVCache(&afid, 0, 0))) {
319 } else if (((tvc->f.states) & (CStatd | CTruth))) {
320 /* CTruth will be set if the object has
325 else if (vtype == VREG)
327 /* Don't do this until we're sure it can't be a mtpt */
328 /* else if (vtype == VLNK)
330 /* what other types does AFS support? */
332 /* clean up from afs_FindVCache */
336 * If this is NFS readdirplus, then the filler is going to
337 * call getattr on this inode, which will deadlock if we're
341 code = (*filldir) (dirbuf, de->name, len, offset, ino, type);
347 offset = dirpos + 1 + ((len + 16) >> 5);
349 /* If filldir didn't fill in the last one this is still pointing to that
352 fp->f_pos = (loff_t) offset;
354 ReleaseReadLock(&tdc->lock);
356 UpgradeSToWLock(&avc->lock, 813);
357 avc->f.states &= ~CReadDir;
359 avc->readdir_pid = 0;
360 ReleaseSharedLock(&avc->lock);
364 afs_PutFakeStat(&fakestat);
367 afs_maybe_unlock_kernel();
372 /* in afs_pioctl.c */
373 extern int afs_xioctl(struct inode *ip, struct file *fp, unsigned int com,
376 #if defined(HAVE_UNLOCKED_IOCTL) || defined(HAVE_COMPAT_IOCTL)
377 static long afs_unlocked_xioctl(struct file *fp, unsigned int com,
379 return afs_xioctl(FILE_INODE(fp), fp, com, arg);
386 afs_linux_mmap(struct file *fp, struct vm_area_struct *vmap)
388 struct vcache *vcp = VTOAFS(FILE_INODE(fp));
392 afs_Trace3(afs_iclSetp, CM_TRACE_GMAP, ICL_TYPE_POINTER, vcp,
393 ICL_TYPE_POINTER, vmap->vm_start, ICL_TYPE_INT32,
394 vmap->vm_end - vmap->vm_start);
396 /* get a validated vcache entry */
397 code = afs_linux_VerifyVCache(vcp, NULL);
399 /* Linux's Flushpage implementation doesn't use credp, so optimise
400 * our code to not need to crref() it */
401 osi_FlushPages(vcp, NULL); /* ensure stale pages are gone */
403 code = generic_file_mmap(fp, vmap);
406 vcp->f.states |= CMAPPED;
413 afs_linux_open(struct inode *ip, struct file *fp)
415 struct vcache *vcp = VTOAFS(ip);
416 cred_t *credp = crref();
419 afs_maybe_lock_kernel();
421 code = afs_open(&vcp, fp->f_flags, credp);
423 afs_maybe_unlock_kernel();
426 return afs_convert_code(code);
430 afs_linux_release(struct inode *ip, struct file *fp)
432 struct vcache *vcp = VTOAFS(ip);
433 cred_t *credp = crref();
436 afs_maybe_lock_kernel();
438 code = afs_close(vcp, fp->f_flags, credp);
439 ObtainWriteLock(&vcp->lock, 807);
444 ReleaseWriteLock(&vcp->lock);
446 afs_maybe_unlock_kernel();
449 return afs_convert_code(code);
453 afs_linux_fsync(struct file *fp, struct dentry *dp, int datasync)
456 struct inode *ip = FILE_INODE(fp);
457 cred_t *credp = crref();
459 afs_maybe_lock_kernel();
461 code = afs_fsync(VTOAFS(ip), credp);
463 afs_maybe_unlock_kernel();
465 return afs_convert_code(code);
471 afs_linux_lock(struct file *fp, int cmd, struct file_lock *flp)
474 struct vcache *vcp = VTOAFS(FILE_INODE(fp));
475 cred_t *credp = crref();
476 struct AFS_FLOCK flock;
478 /* Convert to a lock format afs_lockctl understands. */
479 memset(&flock, 0, sizeof(flock));
480 flock.l_type = flp->fl_type;
481 flock.l_pid = flp->fl_pid;
483 flock.l_start = flp->fl_start;
484 flock.l_len = flp->fl_end - flp->fl_start + 1;
486 /* Safe because there are no large files, yet */
487 #if defined(F_GETLK64) && (F_GETLK != F_GETLK64)
488 if (cmd == F_GETLK64)
490 else if (cmd == F_SETLK64)
492 else if (cmd == F_SETLKW64)
494 #endif /* F_GETLK64 && F_GETLK != F_GETLK64 */
497 code = afs_lockctl(vcp, &flock, cmd, credp);
500 if ((code == 0 || flp->fl_type == F_UNLCK) &&
501 (cmd == F_SETLK || cmd == F_SETLKW)) {
502 code = afs_posix_lock_file(fp, flp);
503 if (code && flp->fl_type != F_UNLCK) {
504 struct AFS_FLOCK flock2;
506 flock2.l_type = F_UNLCK;
508 afs_lockctl(vcp, &flock2, F_SETLK, credp);
512 /* If lockctl says there are no conflicting locks, then also check with the
513 * kernel, as lockctl knows nothing about byte range locks
515 if (code == 0 && cmd == F_GETLK && flock.l_type == F_UNLCK) {
516 afs_posix_test_lock(fp, flp);
517 /* If we found a lock in the kernel's structure, return it */
518 if (flp->fl_type != F_UNLCK) {
524 /* Convert flock back to Linux's file_lock */
525 flp->fl_type = flock.l_type;
526 flp->fl_pid = flock.l_pid;
527 flp->fl_start = flock.l_start;
528 flp->fl_end = flock.l_start + flock.l_len - 1;
531 return afs_convert_code(code);
534 #ifdef STRUCT_FILE_OPERATIONS_HAS_FLOCK
536 afs_linux_flock(struct file *fp, int cmd, struct file_lock *flp) {
538 struct vcache *vcp = VTOAFS(FILE_INODE(fp));
539 cred_t *credp = crref();
540 struct AFS_FLOCK flock;
541 /* Convert to a lock format afs_lockctl understands. */
542 memset(&flock, 0, sizeof(flock));
543 flock.l_type = flp->fl_type;
544 flock.l_pid = flp->fl_pid;
547 flock.l_len = OFFSET_MAX;
549 /* Safe because there are no large files, yet */
550 #if defined(F_GETLK64) && (F_GETLK != F_GETLK64)
551 if (cmd == F_GETLK64)
553 else if (cmd == F_SETLK64)
555 else if (cmd == F_SETLKW64)
557 #endif /* F_GETLK64 && F_GETLK != F_GETLK64 */
560 code = afs_lockctl(vcp, &flock, cmd, credp);
563 if ((code == 0 || flp->fl_type == F_UNLCK) &&
564 (cmd == F_SETLK || cmd == F_SETLKW)) {
565 flp->fl_flags &=~ FL_SLEEP;
566 code = flock_lock_file_wait(fp, flp);
567 if (code && flp->fl_type != F_UNLCK) {
568 struct AFS_FLOCK flock2;
570 flock2.l_type = F_UNLCK;
572 afs_lockctl(vcp, &flock2, F_SETLK, credp);
576 /* Convert flock back to Linux's file_lock */
577 flp->fl_type = flock.l_type;
578 flp->fl_pid = flock.l_pid;
581 return afs_convert_code(code);
586 * essentially the same as afs_fsync() but we need to get the return
587 * code for the sys_close() here, not afs_linux_release(), so call
588 * afs_StoreAllSegments() with AFS_LASTSTORE
591 #if defined(FOP_FLUSH_TAKES_FL_OWNER_T)
592 afs_linux_flush(struct file *fp, fl_owner_t id)
594 afs_linux_flush(struct file *fp)
597 struct vrequest treq;
601 #if defined(AFS_CACHE_BYPASS)
607 if ((fp->f_flags & O_ACCMODE) == O_RDONLY) { /* readers dont flush */
615 vcp = VTOAFS(FILE_INODE(fp));
617 code = afs_InitReq(&treq, credp);
620 #if defined(AFS_CACHE_BYPASS)
621 /* If caching is bypassed for this file, or globally, just return 0 */
622 if(cache_bypass_strategy == ALWAYS_BYPASS_CACHE)
625 ObtainReadLock(&vcp->lock);
626 if(vcp->cachingStates & FCSBypass)
628 ReleaseReadLock(&vcp->lock);
631 /* future proof: don't rely on 0 return from afs_InitReq */
636 ObtainSharedLock(&vcp->lock, 535);
637 if ((vcp->execsOrWriters > 0) && (file_count(fp) == 1)) {
638 UpgradeSToWLock(&vcp->lock, 536);
639 if (!AFS_IS_DISCONNECTED) {
640 code = afs_StoreAllSegments(vcp,
642 AFS_SYNC | AFS_LASTSTORE);
644 afs_DisconAddDirty(vcp, VDisconWriteOsiFlush, 1);
646 ConvertWToSLock(&vcp->lock);
648 code = afs_CheckCode(code, &treq, 54);
649 ReleaseSharedLock(&vcp->lock);
656 return afs_convert_code(code);
659 struct file_operations afs_dir_fops = {
660 .read = generic_read_dir,
661 .readdir = afs_linux_readdir,
662 #ifdef HAVE_UNLOCKED_IOCTL
663 .unlocked_ioctl = afs_unlocked_xioctl,
667 #ifdef HAVE_COMPAT_IOCTL
668 .compat_ioctl = afs_unlocked_xioctl,
670 .open = afs_linux_open,
671 .release = afs_linux_release,
674 struct file_operations afs_file_fops = {
675 .read = afs_linux_read,
676 .write = afs_linux_write,
677 #ifdef GENERIC_FILE_AIO_READ
678 .aio_read = generic_file_aio_read,
679 .aio_write = generic_file_aio_write,
681 #ifdef HAVE_UNLOCKED_IOCTL
682 .unlocked_ioctl = afs_unlocked_xioctl,
686 #ifdef HAVE_COMPAT_IOCTL
687 .compat_ioctl = afs_unlocked_xioctl,
689 .mmap = afs_linux_mmap,
690 .open = afs_linux_open,
691 .flush = afs_linux_flush,
692 #if defined(STRUCT_FILE_OPERATIONS_HAS_SENDFILE)
693 .sendfile = generic_file_sendfile,
695 #if defined(STRUCT_FILE_OPERATIONS_HAS_SPLICE)
696 .splice_write = generic_file_splice_write,
697 .splice_read = generic_file_splice_read,
699 .release = afs_linux_release,
700 .fsync = afs_linux_fsync,
701 .lock = afs_linux_lock,
702 #ifdef STRUCT_FILE_OPERATIONS_HAS_FLOCK
703 .flock = afs_linux_flock,
708 /**********************************************************************
709 * AFS Linux dentry operations
710 **********************************************************************/
712 /* check_bad_parent() : Checks if this dentry's vcache is a root vcache
713 * that has its mvid (parent dir's fid) pointer set to the wrong directory
714 * due to being mounted in multiple points at once. If so, check_bad_parent()
715 * calls afs_lookup() to correct the vcache's mvid, as well as the volume's
716 * dotdotfid and mtpoint fid members.
718 * dp - dentry to be checked.
722 * This dentry's vcache's mvid will be set to the correct parent directory's
724 * This root vnode's volume will have its dotdotfid and mtpoint fids set
725 * to the correct parent and mountpoint fids.
729 check_bad_parent(struct dentry *dp)
732 struct vcache *vcp = VTOAFS(dp->d_inode), *avc = NULL;
733 struct vcache *pvc = VTOAFS(dp->d_parent->d_inode);
735 if (vcp->mvid->Fid.Volume != pvc->f.fid.Fid.Volume) { /* bad parent */
738 /* force a lookup, so vcp->mvid is fixed up */
739 afs_lookup(pvc, (char *)dp->d_name.name, &avc, credp);
740 if (!avc || vcp != avc) { /* bad, very bad.. */
741 afs_Trace4(afs_iclSetp, CM_TRACE_TMP_1S3L, ICL_TYPE_STRING,
742 "check_bad_parent: bad pointer returned from afs_lookup origvc newvc dentry",
743 ICL_TYPE_POINTER, vcp, ICL_TYPE_POINTER, avc,
744 ICL_TYPE_POINTER, dp);
747 AFS_RELE(AFSTOV(avc));
754 /* afs_linux_revalidate
755 * Ensure vcache is stat'd before use. Return 0 if entry is valid.
758 afs_linux_revalidate(struct dentry *dp)
761 struct vcache *vcp = VTOAFS(dp->d_inode);
765 if (afs_shuttingdown)
768 afs_maybe_lock_kernel();
772 /* Make this a fast path (no crref), since it's called so often. */
773 if (vcp->f.states & CStatd) {
775 if (*dp->d_name.name != '/' && vcp->mvstat == 2) /* root vnode */
776 check_bad_parent(dp); /* check and correct mvid */
784 /* This avoids the crref when we don't have to do it. Watch for
785 * changes in afs_getattr that don't get replicated here!
787 if (vcp->f.states & CStatd &&
788 (!afs_fakestat_enable || vcp->mvstat != 1) &&
790 code = afs_CopyOutAttrs(vcp, &vattr);
793 code = afs_getattr(vcp, &vattr, credp);
797 afs_fill_inode(AFSTOV(vcp), &vattr);
800 afs_maybe_unlock_kernel();
802 return afs_convert_code(code);
806 afs_linux_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
808 int err = afs_linux_revalidate(dentry);
810 generic_fillattr(dentry->d_inode, stat);
815 /* Validate a dentry. Return 1 if unchanged, 0 if VFS layer should re-evaluate.
816 * In kernels 2.2.10 and above, we are passed an additional flags var which
817 * may have either the LOOKUP_FOLLOW OR LOOKUP_DIRECTORY set in which case
818 * we are advised to follow the entry if it is a link or to make sure that
819 * it is a directory. But since the kernel itself checks these possibilities
820 * later on, we shouldn't have to do it until later. Perhaps in the future..
823 #ifdef DOP_REVALIDATE_TAKES_NAMEIDATA
824 afs_linux_dentry_revalidate(struct dentry *dp, struct nameidata *nd)
826 afs_linux_dentry_revalidate(struct dentry *dp, int flags)
830 cred_t *credp = NULL;
831 struct vcache *vcp, *pvcp, *tvc = NULL;
833 struct afs_fakestat_state fakestate;
835 afs_maybe_lock_kernel();
837 afs_InitFakeStat(&fakestate);
841 vcp = VTOAFS(dp->d_inode);
842 pvcp = VTOAFS(dp->d_parent->d_inode); /* dget_parent()? */
844 if (vcp == afs_globalVp)
847 if (vcp->mvstat == 1) { /* mount point */
848 if (vcp->mvid && (vcp->f.states & CMValid)) {
851 struct vrequest treq;
854 code = afs_InitReq(&treq, credp);
856 (strcmp(dp->d_name.name, ".directory") == 0)) {
860 code = afs_TryEvalFakeStat(&vcp, &fakestate, &treq);
862 code = afs_EvalFakeStat(&vcp, &fakestate, &treq);
863 if ((tryEvalOnly && vcp->mvstat == 1) || code) {
864 /* a mount point, not yet replaced by its directory */
869 if (*dp->d_name.name != '/' && vcp->mvstat == 2) /* root vnode */
870 check_bad_parent(dp); /* check and correct mvid */
873 /* If the last looker changes, we should make sure the current
874 * looker still has permission to examine this file. This would
875 * always require a crref() which would be "slow".
877 if (vcp->last_looker != treq.uid) {
878 if (!afs_AccessOK(vcp, (vType(vcp) == VREG) ? PRSFS_READ : PRSFS_LOOKUP, &treq, CHECK_MODE_BITS))
881 vcp->last_looker = treq.uid;
885 /* If the parent's DataVersion has changed or the vnode
886 * is longer valid, we need to do a full lookup. VerifyVCache
887 * isn't enough since the vnode may have been renamed.
890 if (hgetlo(pvcp->f.m.DataVersion) > dp->d_time || !(vcp->f.states & CStatd)) {
893 afs_lookup(pvcp, (char *)dp->d_name.name, &tvc, credp);
894 if (!tvc || tvc != vcp)
897 if (afs_getattr(vcp, &vattr, credp))
900 vattr2inode(AFSTOV(vcp), &vattr);
901 dp->d_time = hgetlo(pvcp->f.m.DataVersion);
904 /* should we always update the attributes at this point? */
905 /* unlikely--the vcache entry hasn't changed */
909 pvcp = VTOAFS(dp->d_parent->d_inode); /* dget_parent()? */
910 if (hgetlo(pvcp->f.m.DataVersion) > dp->d_time)
914 /* No change in parent's DataVersion so this negative
915 * lookup is still valid. BUT, if a server is down a
916 * negative lookup can result so there should be a
917 * liftime as well. For now, always expire.
930 afs_PutFakeStat(&fakestate);
936 shrink_dcache_parent(dp);
939 afs_maybe_unlock_kernel();
943 if (have_submounts(dp))
951 afs_dentry_iput(struct dentry *dp, struct inode *ip)
953 struct vcache *vcp = VTOAFS(ip);
956 if (!AFS_IS_DISCONNECTED || (vcp->f.states & CUnlinked)) {
957 (void) afs_InactiveVCache(vcp, NULL);
960 afs_linux_clear_nfsfs_renamed(dp);
966 afs_dentry_delete(struct dentry *dp)
968 if (dp->d_inode && (VTOAFS(dp->d_inode)->f.states & CUnlinked))
969 return 1; /* bad inode? */
974 struct dentry_operations afs_dentry_operations = {
975 .d_revalidate = afs_linux_dentry_revalidate,
976 .d_delete = afs_dentry_delete,
977 .d_iput = afs_dentry_iput,
980 /**********************************************************************
981 * AFS Linux inode operations
982 **********************************************************************/
986 * Merely need to set enough of vattr to get us through the create. Note
987 * that the higher level code (open_namei) will take care of any tuncation
988 * explicitly. Exclusive open is also taken care of in open_namei.
990 * name is in kernel space at this point.
993 #ifdef IOP_CREATE_TAKES_NAMEIDATA
994 afs_linux_create(struct inode *dip, struct dentry *dp, int mode,
995 struct nameidata *nd)
997 afs_linux_create(struct inode *dip, struct dentry *dp, int mode)
1001 cred_t *credp = crref();
1002 const char *name = dp->d_name.name;
1007 vattr.va_mode = mode;
1008 vattr.va_type = mode & S_IFMT;
1010 afs_maybe_lock_kernel();
1012 code = afs_create(VTOAFS(dip), (char *)name, &vattr, NONEXCL, mode,
1016 struct inode *ip = AFSTOV(vcp);
1018 afs_getattr(vcp, &vattr, credp);
1019 afs_fill_inode(ip, &vattr);
1020 insert_inode_hash(ip);
1021 dp->d_op = &afs_dentry_operations;
1022 dp->d_time = hgetlo(VTOAFS(dip)->f.m.DataVersion);
1023 d_instantiate(dp, ip);
1027 afs_maybe_unlock_kernel();
1029 return afs_convert_code(code);
1032 /* afs_linux_lookup */
1033 static struct dentry *
1034 #ifdef IOP_LOOKUP_TAKES_NAMEIDATA
1035 afs_linux_lookup(struct inode *dip, struct dentry *dp,
1036 struct nameidata *nd)
1038 afs_linux_lookup(struct inode *dip, struct dentry *dp)
1041 cred_t *credp = crref();
1042 struct vcache *vcp = NULL;
1043 const char *comp = dp->d_name.name;
1044 struct inode *ip = NULL;
1045 struct dentry *newdp = NULL;
1048 afs_maybe_lock_kernel();
1050 code = afs_lookup(VTOAFS(dip), (char *)comp, &vcp, credp);
1056 afs_getattr(vcp, &vattr, credp);
1057 afs_fill_inode(ip, &vattr);
1058 if (hlist_unhashed(&ip->i_hash))
1059 insert_inode_hash(ip);
1061 dp->d_op = &afs_dentry_operations;
1062 dp->d_time = hgetlo(VTOAFS(dip)->f.m.DataVersion);
1065 if (ip && S_ISDIR(ip->i_mode)) {
1066 struct dentry *alias;
1068 /* Try to invalidate an existing alias in favor of our new one */
1069 alias = d_find_alias(ip);
1070 /* But not if it's disconnected; then we want d_splice_alias below */
1071 if (alias && !(alias->d_flags & DCACHE_DISCONNECTED)) {
1072 if (d_invalidate(alias) == 0) {
1082 newdp = d_splice_alias(ip, dp);
1084 afs_maybe_unlock_kernel();
1087 /* It's ok for the file to not be found. That's noted by the caller by
1088 * seeing that the dp->d_inode field is NULL.
1090 if (!code || code == ENOENT)
1093 return ERR_PTR(afs_convert_code(code));
1097 afs_linux_link(struct dentry *olddp, struct inode *dip, struct dentry *newdp)
1100 cred_t *credp = crref();
1101 const char *name = newdp->d_name.name;
1102 struct inode *oldip = olddp->d_inode;
1104 /* If afs_link returned the vnode, we could instantiate the
1105 * dentry. Since it's not, we drop this one and do a new lookup.
1110 code = afs_link(VTOAFS(oldip), VTOAFS(dip), (char *)name, credp);
1114 return afs_convert_code(code);
1118 afs_linux_unlink(struct inode *dip, struct dentry *dp)
1121 cred_t *credp = crref();
1122 const char *name = dp->d_name.name;
1123 struct vcache *tvc = VTOAFS(dp->d_inode);
1125 afs_maybe_lock_kernel();
1126 if (VREFCOUNT(tvc) > 1 && tvc->opens > 0
1127 && !(tvc->f.states & CUnlinked)) {
1128 struct dentry *__dp;
1138 osi_FreeSmallSpace(__name);
1139 __name = afs_newname();
1142 __dp = lookup_one_len(__name, dp->d_parent, strlen(__name));
1146 } while (__dp->d_inode != NULL);
1149 code = afs_rename(VTOAFS(dip), (char *)dp->d_name.name, VTOAFS(dip), (char *)__dp->d_name.name, credp);
1151 tvc->mvid = (void *) __name;
1154 crfree(tvc->uncred);
1156 tvc->uncred = credp;
1157 tvc->f.states |= CUnlinked;
1158 afs_linux_set_nfsfs_renamed(dp);
1160 osi_FreeSmallSpace(__name);
1165 __dp->d_time = hgetlo(VTOAFS(dip)->f.m.DataVersion);
1174 code = afs_remove(VTOAFS(dip), (char *)name, credp);
1179 afs_maybe_unlock_kernel();
1181 return afs_convert_code(code);
1186 afs_linux_symlink(struct inode *dip, struct dentry *dp, const char *target)
1189 cred_t *credp = crref();
1191 const char *name = dp->d_name.name;
1193 /* If afs_symlink returned the vnode, we could instantiate the
1194 * dentry. Since it's not, we drop this one and do a new lookup.
1200 code = afs_symlink(VTOAFS(dip), (char *)name, &vattr, (char *)target, credp);
1203 return afs_convert_code(code);
1207 afs_linux_mkdir(struct inode *dip, struct dentry *dp, int mode)
1210 cred_t *credp = crref();
1211 struct vcache *tvcp = NULL;
1213 const char *name = dp->d_name.name;
1215 afs_maybe_lock_kernel();
1217 vattr.va_mask = ATTR_MODE;
1218 vattr.va_mode = mode;
1220 code = afs_mkdir(VTOAFS(dip), (char *)name, &vattr, &tvcp, credp);
1223 struct inode *ip = AFSTOV(tvcp);
1225 afs_getattr(tvcp, &vattr, credp);
1226 afs_fill_inode(ip, &vattr);
1228 dp->d_op = &afs_dentry_operations;
1229 dp->d_time = hgetlo(VTOAFS(dip)->f.m.DataVersion);
1230 d_instantiate(dp, ip);
1234 afs_maybe_unlock_kernel();
1236 return afs_convert_code(code);
1240 afs_linux_rmdir(struct inode *dip, struct dentry *dp)
1243 cred_t *credp = crref();
1244 const char *name = dp->d_name.name;
1246 /* locking kernel conflicts with glock? */
1249 code = afs_rmdir(VTOAFS(dip), (char *)name, credp);
1252 /* Linux likes to see ENOTEMPTY returned from an rmdir() syscall
1253 * that failed because a directory is not empty. So, we map
1254 * EEXIST to ENOTEMPTY on linux.
1256 if (code == EEXIST) {
1265 return afs_convert_code(code);
1270 afs_linux_rename(struct inode *oldip, struct dentry *olddp,
1271 struct inode *newip, struct dentry *newdp)
1274 cred_t *credp = crref();
1275 const char *oldname = olddp->d_name.name;
1276 const char *newname = newdp->d_name.name;
1277 struct dentry *rehash = NULL;
1279 /* Prevent any new references during rename operation. */
1280 afs_maybe_lock_kernel();
1282 if (!d_unhashed(newdp)) {
1287 if (atomic_read(&olddp->d_count) > 1)
1288 shrink_dcache_parent(olddp);
1291 code = afs_rename(VTOAFS(oldip), (char *)oldname, VTOAFS(newip), (char *)newname, credp);
1295 olddp->d_time = 0; /* force to revalidate */
1300 afs_maybe_unlock_kernel();
1303 return afs_convert_code(code);
1307 /* afs_linux_ireadlink
1308 * Internal readlink which can return link contents to user or kernel space.
1309 * Note that the buffer is NOT supposed to be null-terminated.
1312 afs_linux_ireadlink(struct inode *ip, char *target, int maxlen, uio_seg_t seg)
1315 cred_t *credp = crref();
1319 setup_uio(&tuio, &iov, target, (afs_offs_t) 0, maxlen, UIO_READ, seg);
1320 code = afs_readlink(VTOAFS(ip), &tuio, credp);
1324 return maxlen - tuio.uio_resid;
1326 return afs_convert_code(code);
1329 #if !defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
1330 /* afs_linux_readlink
1331 * Fill target (which is in user space) with contents of symlink.
1334 afs_linux_readlink(struct dentry *dp, char *target, int maxlen)
1337 struct inode *ip = dp->d_inode;
1340 code = afs_linux_ireadlink(ip, target, maxlen, AFS_UIOUSER);
1346 /* afs_linux_follow_link
1347 * a file system dependent link following routine.
1349 static int afs_linux_follow_link(struct dentry *dentry, struct nameidata *nd)
1354 name = osi_Alloc(PATH_MAX);
1360 code = afs_linux_ireadlink(dentry->d_inode, name, PATH_MAX - 1, AFS_UIOSYS);
1368 code = vfs_follow_link(nd, name);
1371 osi_Free(name, PATH_MAX);
1376 #endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
1378 #if defined(AFS_CACHE_BYPASS)
1379 #endif /* defined(AFS_CACHE_BYPASS */
1381 /* Populate a page by filling it from the cache file pointed at by cachefp
1382 * (which contains indicated chunk)
1383 * If task is NULL, the page copy occurs syncronously, and the routine
1384 * returns with page still locked. If task is non-NULL, then page copies
1385 * may occur in the background, and the page will be unlocked when it is
1389 afs_linux_read_cache(struct file *cachefp, struct page *page,
1390 int chunk, struct pagevec *lrupv,
1391 struct afs_pagecopy_task *task) {
1392 loff_t offset = page_offset(page);
1393 struct page *newpage, *cachepage;
1394 struct address_space *cachemapping;
1398 cachemapping = cachefp->f_dentry->d_inode->i_mapping;
1402 /* From our offset, we now need to work out which page in the disk
1403 * file it corresponds to. This will be fun ... */
1404 pageindex = (offset - AFS_CHUNKTOBASE(chunk)) >> PAGE_CACHE_SHIFT;
1406 while (cachepage == NULL) {
1407 cachepage = find_get_page(cachemapping, pageindex);
1410 newpage = page_cache_alloc_cold(cachemapping);
1416 code = add_to_page_cache(newpage, cachemapping,
1417 pageindex, GFP_KERNEL);
1419 cachepage = newpage;
1422 page_cache_get(cachepage);
1423 if (!pagevec_add(lrupv, cachepage))
1424 __pagevec_lru_add_file(lrupv);
1427 page_cache_release(newpage);
1429 if (code != -EEXIST)
1433 lock_page(cachepage);
1437 if (!PageUptodate(cachepage)) {
1438 ClearPageError(cachepage);
1439 code = cachemapping->a_ops->readpage(NULL, cachepage);
1440 if (!code && !task) {
1441 wait_on_page_locked(cachepage);
1444 unlock_page(cachepage);
1448 if (PageUptodate(cachepage)) {
1449 copy_highpage(page, cachepage);
1450 flush_dcache_page(page);
1451 SetPageUptodate(page);
1456 afs_pagecopy_queue_page(task, cachepage, page);
1468 page_cache_release(cachepage);
1474 afs_linux_readpage_fastpath(struct file *fp, struct page *pp, int *codep)
1476 loff_t offset = page_offset(pp);
1477 struct inode *ip = FILE_INODE(fp);
1478 struct vcache *avc = VTOAFS(ip);
1480 struct file *cacheFp = NULL;
1483 struct pagevec lrupv;
1485 /* Not a UFS cache, don't do anything */
1486 if (cacheDiskType != AFS_FCACHE_TYPE_UFS)
1489 /* Can't do anything if the vcache isn't statd , or if the read
1490 * crosses a chunk boundary.
1492 if (!(avc->f.states & CStatd) ||
1493 AFS_CHUNK(offset) != AFS_CHUNK(offset + PAGE_SIZE)) {
1497 ObtainWriteLock(&avc->lock, 911);
1499 /* XXX - See if hinting actually makes things faster !!! */
1501 /* See if we have a suitable entry already cached */
1505 /* We need to lock xdcache, then dcache, to handle situations where
1506 * the hint is on the free list. However, we can't safely do this
1507 * according to the locking hierarchy. So, use a non blocking lock.
1509 ObtainReadLock(&afs_xdcache);
1510 dcLocked = ( 0 == NBObtainReadLock(&tdc->lock));
1512 if (dcLocked && (tdc->index != NULLIDX)
1513 && !FidCmp(&tdc->f.fid, &avc->f.fid)
1514 && tdc->f.chunk == AFS_CHUNK(offset)
1515 && !(afs_indexFlags[tdc->index] & (IFFree | IFDiscarded))) {
1516 /* Bonus - the hint was correct */
1519 /* Only destroy the hint if its actually invalid, not if there's
1520 * just been a locking failure */
1522 ReleaseReadLock(&tdc->lock);
1529 ReleaseReadLock(&afs_xdcache);
1532 /* No hint, or hint is no longer valid - see if we can get something
1533 * directly from the dcache
1536 tdc = afs_FindDCache(avc, offset);
1539 ReleaseWriteLock(&avc->lock);
1544 ObtainReadLock(&tdc->lock);
1546 /* Is the dcache we've been given currently up to date */
1547 if (!hsame(avc->f.m.DataVersion, tdc->f.versionNo) ||
1548 (tdc->dflags & DFFetching)) {
1549 ReleaseWriteLock(&avc->lock);
1550 ReleaseReadLock(&tdc->lock);
1555 /* Update our hint for future abuse */
1558 /* Okay, so we've now got a cache file that is up to date */
1560 /* XXX - I suspect we should be locking the inodes before we use them! */
1562 cacheFp = afs_linux_raw_open(&tdc->f.inode, NULL);
1563 pagevec_init(&lrupv, 0);
1565 code = afs_linux_read_cache(cacheFp, pp, tdc->f.chunk, &lrupv, NULL);
1567 if (pagevec_count(&lrupv))
1568 __pagevec_lru_add_file(&lrupv);
1570 filp_close(cacheFp, NULL);
1573 ReleaseReadLock(&tdc->lock);
1574 ReleaseWriteLock(&avc->lock);
1581 /* afs_linux_readpage
1583 * This function is split into two, because prepare_write/begin_write
1584 * require a readpage call which doesn't unlock the resulting page upon
1588 afs_linux_fillpage(struct file *fp, struct page *pp)
1593 struct iovec *iovecp;
1594 struct inode *ip = FILE_INODE(fp);
1595 afs_int32 cnt = page_count(pp);
1596 struct vcache *avc = VTOAFS(ip);
1597 afs_offs_t offset = page_offset(pp);
1601 if (afs_linux_readpage_fastpath(fp, pp, &code)) {
1611 auio = osi_Alloc(sizeof(uio_t));
1612 iovecp = osi_Alloc(sizeof(struct iovec));
1614 setup_uio(auio, iovecp, (char *)address, offset, PAGE_SIZE, UIO_READ,
1617 afs_maybe_lock_kernel();
1620 afs_Trace4(afs_iclSetp, CM_TRACE_READPAGE, ICL_TYPE_POINTER, ip,
1621 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, cnt, ICL_TYPE_INT32,
1622 99999); /* not a possible code value */
1624 code = afs_rdwr(avc, auio, UIO_READ, 0, credp);
1626 afs_Trace4(afs_iclSetp, CM_TRACE_READPAGE, ICL_TYPE_POINTER, ip,
1627 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, cnt, ICL_TYPE_INT32,
1629 AFS_DISCON_UNLOCK();
1631 afs_maybe_unlock_kernel();
1633 /* XXX valid for no-cache also? Check last bits of files... :)
1634 * Cognate code goes in afs_NoCacheFetchProc. */
1635 if (auio->uio_resid) /* zero remainder of page */
1636 memset((void *)(address + (PAGE_SIZE - auio->uio_resid)), 0,
1639 flush_dcache_page(pp);
1640 SetPageUptodate(pp);
1645 osi_Free(auio, sizeof(uio_t));
1646 osi_Free(iovecp, sizeof(struct iovec));
1649 return afs_convert_code(code);
1653 afs_linux_prefetch(struct file *fp, struct page *pp)
1656 struct vcache *avc = VTOAFS(FILE_INODE(fp));
1657 afs_offs_t offset = page_offset(pp);
1659 if (AFS_CHUNKOFFSET(offset) == 0) {
1661 struct vrequest treq;
1666 code = afs_InitReq(&treq, credp);
1667 if (!code && !NBObtainWriteLock(&avc->lock, 534)) {
1668 tdc = afs_FindDCache(avc, offset);
1670 if (!(tdc->mflags & DFNextStarted))
1671 afs_PrefetchChunk(avc, tdc, credp, &treq);
1674 ReleaseWriteLock(&avc->lock);
1679 return afs_convert_code(code);
1683 #if defined(AFS_CACHE_BYPASS)
1686 afs_linux_bypass_readpages(struct file *fp, struct address_space *mapping,
1687 struct list_head *page_list, unsigned num_pages)
1692 struct iovec* iovecp;
1693 struct nocache_read_request *ancr;
1694 struct page *pp, *ppt;
1695 struct pagevec lrupv;
1699 struct inode *ip = FILE_INODE(fp);
1700 struct vcache *avc = VTOAFS(ip);
1701 afs_int32 base_index = 0;
1702 afs_int32 page_count = 0;
1705 /* background thread must free: iovecp, auio, ancr */
1706 iovecp = osi_Alloc(num_pages * sizeof(struct iovec));
1708 auio = osi_Alloc(sizeof(uio_t));
1709 auio->uio_iov = iovecp;
1710 auio->uio_iovcnt = num_pages;
1711 auio->uio_flag = UIO_READ;
1712 auio->uio_seg = AFS_UIOSYS;
1713 auio->uio_resid = num_pages * PAGE_SIZE;
1715 ancr = osi_Alloc(sizeof(struct nocache_read_request));
1717 ancr->offset = auio->uio_offset;
1718 ancr->length = auio->uio_resid;
1720 pagevec_init(&lrupv, 0);
1722 for(page_ix = 0; page_ix < num_pages; ++page_ix) {
1724 if(list_empty(page_list))
1727 pp = list_entry(page_list->prev, struct page, lru);
1728 /* If we allocate a page and don't remove it from page_list,
1729 * the page cache gets upset. */
1731 isize = (i_size_read(fp->f_mapping->host) - 1) >> PAGE_CACHE_SHIFT;
1732 if(pp->index > isize) {
1739 offset = page_offset(pp);
1740 auio->uio_offset = offset;
1741 base_index = pp->index;
1743 iovecp[page_ix].iov_len = PAGE_SIZE;
1744 code = add_to_page_cache(pp, mapping, pp->index, GFP_KERNEL);
1745 if(base_index != pp->index) {
1748 page_cache_release(pp);
1749 iovecp[page_ix].iov_base = (void *) 0;
1757 page_cache_release(pp);
1758 iovecp[page_ix].iov_base = (void *) 0;
1761 if(!PageLocked(pp)) {
1765 /* save the page for background map */
1766 iovecp[page_ix].iov_base = (void*) pp;
1768 /* and put it on the LRU cache */
1769 if (!pagevec_add(&lrupv, pp))
1770 __pagevec_lru_add(&lrupv);
1774 /* If there were useful pages in the page list, make sure all pages
1775 * are in the LRU cache, then schedule the read */
1777 pagevec_lru_add(&lrupv);
1779 code = afs_ReadNoCache(avc, ancr, credp);
1782 /* If there is nothing for the background thread to handle,
1783 * it won't be freeing the things that we never gave it */
1784 osi_Free(iovecp, num_pages * sizeof(struct iovec));
1785 osi_Free(auio, sizeof(uio_t));
1786 osi_Free(ancr, sizeof(struct nocache_read_request));
1788 /* we do not flush, release, or unmap pages--that will be
1789 * done for us by the background thread as each page comes in
1790 * from the fileserver */
1792 return afs_convert_code(code);
1797 afs_linux_bypass_readpage(struct file *fp, struct page *pp)
1799 cred_t *credp = NULL;
1801 struct iovec *iovecp;
1802 struct nocache_read_request *ancr;
1807 /* receiver frees */
1808 auio = osi_Alloc(sizeof(uio_t));
1809 iovecp = osi_Alloc(sizeof(struct iovec));
1811 /* address can be NULL, because we overwrite it with 'pp', below */
1812 setup_uio(auio, iovecp, NULL, page_offset(pp),
1813 PAGE_SIZE, UIO_READ, AFS_UIOSYS);
1815 /* save the page for background map */
1816 /* XXX - Shouldn't we get a reference count here? */
1817 auio->uio_iov->iov_base = (void*) pp;
1818 /* the background thread will free this */
1819 ancr = osi_Alloc(sizeof(struct nocache_read_request));
1821 ancr->offset = offset;
1822 ancr->length = PAGE_SIZE;
1825 afs_maybe_lock_kernel();
1826 code = afs_ReadNoCache(VTOAFS(FILE_INODE(fp)), ancr, credp);
1827 afs_maybe_unlock_kernel();
1830 return afs_convert_code(code);
1834 afs_linux_can_bypass(struct inode *ip) {
1835 switch(cache_bypass_strategy) {
1836 case NEVER_BYPASS_CACHE:
1838 case ALWAYS_BYPASS_CACHE:
1840 case LARGE_FILES_BYPASS_CACHE:
1841 if(i_size_read(ip) > cache_bypass_threshold)
1848 /* Check if a file is permitted to bypass the cache by policy, and modify
1849 * the cache bypass state recorded for that file */
1852 afs_linux_bypass_check(struct inode *ip) {
1855 int bypass = afs_linux_can_bypass(ip);
1858 trydo_cache_transition(VTOAFS(ip)), credp, bypass);
1866 afs_linux_bypass_check(struct inode *ip) {
1870 afs_linux_bypass_readpage(struct file *fp, struct page *pp) {
1874 afs_linux_bypass_readpages(struct file *fp, struct address_space *mapping,
1875 struct list_head *page_list, unsigned int num_pages) {
1881 afs_linux_readpage(struct file *fp, struct page *pp)
1885 if (afs_linux_bypass_check(FILE_INODE(fp))) {
1886 code = afs_linux_bypass_readpage(fp, pp);
1888 code = afs_linux_fillpage(fp, pp);
1890 code = afs_linux_prefetch(fp, pp);
1897 /* Readpages reads a number of pages for a particular file. We use
1898 * this to optimise the reading, by limiting the number of times upon which
1899 * we have to lookup, lock and open vcaches and dcaches
1903 afs_linux_readpages(struct file *fp, struct address_space *mapping,
1904 struct list_head *page_list, unsigned int num_pages)
1906 struct inode *inode = mapping->host;
1907 struct vcache *avc = VTOAFS(inode);
1909 struct file *cacheFp = NULL;
1911 unsigned int page_idx;
1913 struct pagevec lrupv;
1914 struct afs_pagecopy_task *task;
1916 if (afs_linux_bypass_check(inode))
1917 return afs_linux_bypass_readpages(fp, mapping, page_list, num_pages);
1920 if ((code = afs_linux_VerifyVCache(avc, NULL))) {
1925 ObtainWriteLock(&avc->lock, 912);
1928 task = afs_pagecopy_init_task();
1931 pagevec_init(&lrupv, 0);
1932 for (page_idx = 0; page_idx < num_pages; page_idx++) {
1933 struct page *page = list_entry(page_list->prev, struct page, lru);
1934 list_del(&page->lru);
1935 offset = page_offset(page);
1937 if (tdc && tdc->f.chunk != AFS_CHUNK(offset)) {
1939 ReleaseReadLock(&tdc->lock);
1944 filp_close(cacheFp, NULL);
1949 if ((tdc = afs_FindDCache(avc, offset))) {
1950 ObtainReadLock(&tdc->lock);
1951 if (!hsame(avc->f.m.DataVersion, tdc->f.versionNo) ||
1952 (tdc->dflags & DFFetching)) {
1953 ReleaseReadLock(&tdc->lock);
1960 cacheFp = afs_linux_raw_open(&tdc->f.inode, NULL);
1963 if (tdc && !add_to_page_cache(page, mapping, page->index,
1965 page_cache_get(page);
1966 if (!pagevec_add(&lrupv, page))
1967 __pagevec_lru_add_file(&lrupv);
1969 afs_linux_read_cache(cacheFp, page, tdc->f.chunk, &lrupv, task);
1971 page_cache_release(page);
1973 if (pagevec_count(&lrupv))
1974 __pagevec_lru_add_file(&lrupv);
1977 filp_close(cacheFp, NULL);
1979 afs_pagecopy_put_task(task);
1983 ReleaseReadLock(&tdc->lock);
1987 ReleaseWriteLock(&avc->lock);
1992 /* Prepare an AFS vcache for writeback. Should be called with the vcache
1995 afs_linux_prepare_writeback(struct vcache *avc) {
1996 if (avc->f.states & CPageWrite) {
1997 return AOP_WRITEPAGE_ACTIVATE;
1999 avc->f.states |= CPageWrite;
2004 afs_linux_dopartialwrite(struct vcache *avc, cred_t *credp) {
2005 struct vrequest treq;
2008 if (!afs_InitReq(&treq, credp))
2009 code = afs_DoPartialWrite(avc, &treq);
2011 return afs_convert_code(code);
2015 afs_linux_complete_writeback(struct vcache *avc) {
2016 avc->f.states &= ~CPageWrite;
2019 /* Writeback a given page syncronously. Called with no AFS locks held */
2021 afs_linux_page_writeback(struct inode *ip, struct page *pp,
2022 unsigned long offset, unsigned int count,
2025 struct vcache *vcp = VTOAFS(ip);
2033 buffer = kmap(pp) + offset;
2034 base = page_offset(pp) + offset;
2036 afs_maybe_lock_kernel();
2038 afs_Trace4(afs_iclSetp, CM_TRACE_UPDATEPAGE, ICL_TYPE_POINTER, vcp,
2039 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, page_count(pp),
2040 ICL_TYPE_INT32, 99999);
2042 setup_uio(&tuio, &iovec, buffer, base, count, UIO_WRITE, AFS_UIOSYS);
2044 code = afs_write(vcp, &tuio, f_flags, credp, 0);
2046 i_size_write(ip, vcp->f.m.Length);
2047 ip->i_blocks = ((vcp->f.m.Length + 1023) >> 10) << 1;
2049 code = code ? afs_convert_code(code) : count - tuio.uio_resid;
2051 afs_Trace4(afs_iclSetp, CM_TRACE_UPDATEPAGE, ICL_TYPE_POINTER, vcp,
2052 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, page_count(pp),
2053 ICL_TYPE_INT32, code);
2056 afs_maybe_unlock_kernel();
2063 afs_linux_writepage_sync(struct inode *ip, struct page *pp,
2064 unsigned long offset, unsigned int count)
2068 struct vcache *vcp = VTOAFS(ip);
2071 /* Catch recursive writeback. This occurs if the kernel decides
2072 * writeback is required whilst we are writing to the cache, or
2073 * flushing to the server. */
2075 ObtainWriteLock(&vcp->lock, 532);
2076 code = afs_linux_prepare_writeback(vcp);
2078 ReleaseWriteLock(&vcp->lock);
2082 ReleaseWriteLock(&vcp->lock);
2086 code = afs_linux_page_writeback(ip, pp, offset, count, credp);
2088 afs_maybe_lock_kernel();
2090 ObtainWriteLock(&vcp->lock, 533);
2092 code1 = afs_linux_dopartialwrite(vcp, credp);
2093 afs_linux_complete_writeback(vcp);
2094 ReleaseWriteLock(&vcp->lock);
2096 afs_maybe_unlock_kernel();
2106 #ifdef AOP_WRITEPAGE_TAKES_WRITEBACK_CONTROL
2107 afs_linux_writepage(struct page *pp, struct writeback_control *wbc)
2109 afs_linux_writepage(struct page *pp)
2112 struct address_space *mapping = pp->mapping;
2113 struct inode *inode;
2116 unsigned int to = PAGE_CACHE_SIZE;
2121 if (PageReclaim(pp)) {
2122 return AOP_WRITEPAGE_ACTIVATE;
2123 /* XXX - Do we need to redirty the page here? */
2128 inode = mapping->host;
2129 vcp = VTOAFS(inode);
2130 isize = i_size_read(inode);
2132 /* Don't defeat an earlier truncate */
2133 if (page_offset(pp) > isize)
2137 ObtainWriteLock(&vcp->lock, 537);
2138 code = afs_linux_prepare_writeback(vcp);
2140 ReleaseWriteLock(&vcp->lock);
2144 /* Grab the creds structure currently held in the vnode, and
2145 * get a reference to it, in case it goes away ... */
2148 ReleaseWriteLock(&vcp->lock);
2151 /* If this is the final page, then just write the number of bytes that
2152 * are actually in it */
2153 if ((isize - page_offset(pp)) < to )
2154 to = isize - page_offset(pp);
2156 code = afs_linux_page_writeback(inode, pp, 0, to, credp);
2158 afs_maybe_lock_kernel();
2160 ObtainWriteLock(&vcp->lock, 538);
2162 /* As much as we might like to ignore a file server error here,
2163 * and just try again when we close(), unfortunately StoreAllSegments
2164 * will invalidate our chunks if the server returns a permanent error,
2165 * so we need to at least try and get that error back to the user
2168 code1 = afs_linux_dopartialwrite(vcp, credp);
2170 afs_linux_complete_writeback(vcp);
2171 ReleaseWriteLock(&vcp->lock);
2174 afs_maybe_unlock_kernel();
2177 SetPageUptodate(pp);
2178 if ( code != AOP_WRITEPAGE_ACTIVATE ) {
2179 /* XXX - do we need to redirty the page here? */
2183 page_cache_release(pp);
2194 /* afs_linux_permission
2195 * Check access rights - returns error if can't check or permission denied.
2198 #ifdef IOP_PERMISSION_TAKES_NAMEIDATA
2199 afs_linux_permission(struct inode *ip, int mode, struct nameidata *nd)
2201 afs_linux_permission(struct inode *ip, int mode)
2205 cred_t *credp = crref();
2209 if (mode & MAY_EXEC)
2211 if (mode & MAY_READ)
2213 if (mode & MAY_WRITE)
2215 code = afs_access(VTOAFS(ip), tmp, credp);
2219 return afs_convert_code(code);
2222 #if !defined(HAVE_WRITE_BEGIN)
2224 afs_linux_commit_write(struct file *file, struct page *page, unsigned offset,
2229 code = afs_linux_writepage_sync(file->f_dentry->d_inode, page,
2230 offset, to - offset);
2236 afs_linux_prepare_write(struct file *file, struct page *page, unsigned from,
2244 afs_linux_write_end(struct file *file, struct address_space *mapping,
2245 loff_t pos, unsigned len, unsigned copied,
2246 struct page *page, void *fsdata)
2249 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
2251 code = afs_linux_writepage_sync(file->f_dentry->d_inode, page,
2254 page_cache_release(page);
2259 afs_linux_write_begin(struct file *file, struct address_space *mapping,
2260 loff_t pos, unsigned len, unsigned flags,
2261 struct page **pagep, void **fsdata)
2264 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2265 page = grab_cache_page_write_begin(mapping, index, flags);
2273 static struct inode_operations afs_file_iops = {
2274 .permission = afs_linux_permission,
2275 .getattr = afs_linux_getattr,
2276 .setattr = afs_notify_change,
2279 static struct address_space_operations afs_file_aops = {
2280 .readpage = afs_linux_readpage,
2281 .readpages = afs_linux_readpages,
2282 .writepage = afs_linux_writepage,
2283 #if defined (HAVE_WRITE_BEGIN)
2284 .write_begin = afs_linux_write_begin,
2285 .write_end = afs_linux_write_end,
2287 .commit_write = afs_linux_commit_write,
2288 .prepare_write = afs_linux_prepare_write,
2293 /* Separate ops vector for directories. Linux 2.2 tests type of inode
2294 * by what sort of operation is allowed.....
2297 static struct inode_operations afs_dir_iops = {
2298 .setattr = afs_notify_change,
2299 .create = afs_linux_create,
2300 .lookup = afs_linux_lookup,
2301 .link = afs_linux_link,
2302 .unlink = afs_linux_unlink,
2303 .symlink = afs_linux_symlink,
2304 .mkdir = afs_linux_mkdir,
2305 .rmdir = afs_linux_rmdir,
2306 .rename = afs_linux_rename,
2307 .getattr = afs_linux_getattr,
2308 .permission = afs_linux_permission,
2311 /* We really need a separate symlink set of ops, since do_follow_link()
2312 * determines if it _is_ a link by checking if the follow_link op is set.
2314 #if defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
2316 afs_symlink_filler(struct file *file, struct page *page)
2318 struct inode *ip = (struct inode *)page->mapping->host;
2319 char *p = (char *)kmap(page);
2322 afs_maybe_lock_kernel();
2324 code = afs_linux_ireadlink(ip, p, PAGE_SIZE, AFS_UIOSYS);
2329 p[code] = '\0'; /* null terminate? */
2330 afs_maybe_unlock_kernel();
2332 SetPageUptodate(page);
2338 afs_maybe_unlock_kernel();
2346 static struct address_space_operations afs_symlink_aops = {
2347 .readpage = afs_symlink_filler
2349 #endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
2351 static struct inode_operations afs_symlink_iops = {
2352 #if defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
2353 .readlink = page_readlink,
2354 # if defined(HAVE_KERNEL_PAGE_FOLLOW_LINK)
2355 .follow_link = page_follow_link,
2357 .follow_link = page_follow_link_light,
2358 .put_link = page_put_link,
2360 #else /* !defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE) */
2361 .readlink = afs_linux_readlink,
2362 .follow_link = afs_linux_follow_link,
2363 #endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
2364 .setattr = afs_notify_change,
2368 afs_fill_inode(struct inode *ip, struct vattr *vattr)
2372 vattr2inode(ip, vattr);
2374 ip->i_mapping->backing_dev_info = &afs_backing_dev_info;
2375 /* Reset ops if symlink or directory. */
2376 if (S_ISREG(ip->i_mode)) {
2377 ip->i_op = &afs_file_iops;
2378 ip->i_fop = &afs_file_fops;
2379 ip->i_data.a_ops = &afs_file_aops;
2381 } else if (S_ISDIR(ip->i_mode)) {
2382 ip->i_op = &afs_dir_iops;
2383 ip->i_fop = &afs_dir_fops;
2385 } else if (S_ISLNK(ip->i_mode)) {
2386 ip->i_op = &afs_symlink_iops;
2387 #if defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
2388 ip->i_data.a_ops = &afs_symlink_aops;
2389 ip->i_mapping = &ip->i_data;