2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
11 * Linux specific vnodeops. Also includes the glue routines required to call
14 * So far the only truly scary part is that Linux relies on the inode cache
15 * to be up to date. Don't you dare break a callback and expect an fstat
16 * to give you meaningful information. This appears to be fixed in the 2.1
17 * development kernels. As it is we can fix this now by intercepting the
21 #include <afsconfig.h>
22 #include "afs/param.h"
25 #include "afs/sysincludes.h"
26 #include "afsincludes.h"
27 #include "afs/afs_stats.h"
29 #ifdef HAVE_MM_INLINE_H
30 #include <linux/mm_inline.h>
32 #include <linux/pagemap.h>
33 #include <linux/smp_lock.h>
34 #include <linux/writeback.h>
35 #include <linux/pagevec.h>
36 #if defined(AFS_CACHE_BYPASS)
38 #include "afs/afs_bypasscache.h"
41 #include "osi_compat.h"
42 #include "osi_pagecopy.h"
44 #ifndef HAVE_PAGEVEC_LRU_ADD_FILE
45 #define __pagevec_lru_add_file __pagevec_lru_add
49 #define MAX_ERRNO 1000L
52 extern struct backing_dev_info afs_backing_dev_info;
54 extern struct vcache *afs_globalVp;
55 extern int afs_notify_change(struct dentry *dp, struct iattr *iattrp);
56 /* Some uses of BKL are perhaps not needed for bypass or memcache--
57 * why don't we try it out? */
58 extern struct afs_cacheOps afs_UfsCacheOps;
61 afs_maybe_lock_kernel(void) {
62 if(afs_cacheType == &afs_UfsCacheOps)
67 afs_maybe_unlock_kernel(void) {
68 if(afs_cacheType == &afs_UfsCacheOps)
72 /* This function converts a positive error code from AFS into a negative
73 * code suitable for passing into the Linux VFS layer. It checks that the
74 * error code is within the permissable bounds for the ERR_PTR mechanism.
76 * _All_ error codes which come from the AFS layer should be passed through
77 * this function before being returned to the kernel.
81 afs_convert_code(int code) {
82 if ((code >= 0) && (code <= MAX_ERRNO))
88 /* Linux doesn't require a credp for many functions, and crref is an expensive
89 * operation. This helper function avoids obtaining it for VerifyVCache calls
93 afs_linux_VerifyVCache(struct vcache *avc, cred_t **retcred) {
98 if (avc->f.states & CStatd) {
106 code = afs_InitReq(&treq, credp);
108 code = afs_VerifyVCache2(avc, &treq);
115 return afs_convert_code(code);
119 afs_linux_read(struct file *fp, char *buf, size_t count, loff_t * offp)
122 struct vcache *vcp = VTOAFS(fp->f_dentry->d_inode);
125 afs_Trace4(afs_iclSetp, CM_TRACE_READOP, ICL_TYPE_POINTER, vcp,
126 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
128 code = afs_linux_VerifyVCache(vcp, NULL);
131 /* Linux's FlushPages implementation doesn't ever use credp,
132 * so we optimise by not using it */
133 osi_FlushPages(vcp, NULL); /* ensure stale pages are gone */
135 code = do_sync_read(fp, buf, count, offp);
139 afs_Trace4(afs_iclSetp, CM_TRACE_READOP, ICL_TYPE_POINTER, vcp,
140 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
147 /* Now we have integrated VM for writes as well as reads. generic_file_write
148 * also takes care of re-positioning the pointer if file is open in append
149 * mode. Call fake open/close to ensure we do writes of core dumps.
152 afs_linux_write(struct file *fp, const char *buf, size_t count, loff_t * offp)
155 struct vcache *vcp = VTOAFS(fp->f_dentry->d_inode);
160 afs_Trace4(afs_iclSetp, CM_TRACE_WRITEOP, ICL_TYPE_POINTER, vcp,
161 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
162 (fp->f_flags & O_APPEND) ? 99998 : 99999);
164 code = afs_linux_VerifyVCache(vcp, &credp);
166 ObtainWriteLock(&vcp->lock, 529);
168 ReleaseWriteLock(&vcp->lock);
171 code = do_sync_write(fp, buf, count, offp);
175 ObtainWriteLock(&vcp->lock, 530);
177 if (vcp->execsOrWriters == 1 && !credp)
180 afs_FakeClose(vcp, credp);
181 ReleaseWriteLock(&vcp->lock);
183 afs_Trace4(afs_iclSetp, CM_TRACE_WRITEOP, ICL_TYPE_POINTER, vcp,
184 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
193 extern int BlobScan(struct dcache * afile, afs_int32 ablob);
195 /* This is a complete rewrite of afs_readdir, since we can make use of
196 * filldir instead of afs_readdir_move. Note that changes to vcache/dcache
197 * handling and use of bulkstats will need to be reflected here as well.
200 afs_linux_readdir(struct file *fp, void *dirbuf, filldir_t filldir)
202 struct vcache *avc = VTOAFS(FILE_INODE(fp));
203 struct vrequest treq;
204 register struct dcache *tdc;
211 afs_size_t origOffset, tlen;
212 cred_t *credp = crref();
213 struct afs_fakestat_state fakestat;
215 afs_maybe_lock_kernel();
217 AFS_STATCNT(afs_readdir);
219 code = afs_convert_code(afs_InitReq(&treq, credp));
224 afs_InitFakeStat(&fakestat);
225 code = afs_convert_code(afs_EvalFakeStat(&avc, &fakestat, &treq));
229 /* update the cache entry */
231 code = afs_convert_code(afs_VerifyVCache2(avc, &treq));
235 /* get a reference to the entire directory */
236 tdc = afs_GetDCache(avc, (afs_size_t) 0, &treq, &origOffset, &tlen, 1);
242 ObtainSharedLock(&avc->lock, 810);
243 UpgradeSToWLock(&avc->lock, 811);
244 ObtainReadLock(&tdc->lock);
246 * Make sure that the data in the cache is current. There are two
247 * cases we need to worry about:
248 * 1. The cache data is being fetched by another process.
249 * 2. The cache data is no longer valid
251 while ((avc->f.states & CStatd)
252 && (tdc->dflags & DFFetching)
253 && hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
254 ReleaseReadLock(&tdc->lock);
255 ReleaseSharedLock(&avc->lock);
256 afs_osi_Sleep(&tdc->validPos);
257 ObtainSharedLock(&avc->lock, 812);
258 ObtainReadLock(&tdc->lock);
260 if (!(avc->f.states & CStatd)
261 || !hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
262 ReleaseReadLock(&tdc->lock);
263 ReleaseSharedLock(&avc->lock);
268 /* Set the readdir-in-progress flag, and downgrade the lock
269 * to shared so others will be able to acquire a read lock.
271 avc->f.states |= CReadDir;
272 avc->dcreaddir = tdc;
273 avc->readdir_pid = MyPidxx2Pid(MyPidxx);
274 ConvertWToSLock(&avc->lock);
276 /* Fill in until we get an error or we're done. This implementation
277 * takes an offset in units of blobs, rather than bytes.
280 offset = (int) fp->f_pos;
282 dirpos = BlobScan(tdc, offset);
286 de = afs_dir_GetBlob(tdc, dirpos);
290 ino = afs_calc_inum (avc->f.fid.Fid.Volume, ntohl(de->fid.vnode));
293 len = strlen(de->name);
295 printf("afs_linux_readdir: afs_dir_GetBlob failed, null name (inode %lx, dirpos %d)\n",
296 (unsigned long)&tdc->f.inode, dirpos);
298 ReleaseSharedLock(&avc->lock);
304 /* filldir returns -EINVAL when the buffer is full. */
306 unsigned int type = DT_UNKNOWN;
307 struct VenusFid afid;
310 afid.Cell = avc->f.fid.Cell;
311 afid.Fid.Volume = avc->f.fid.Fid.Volume;
312 afid.Fid.Vnode = ntohl(de->fid.vnode);
313 afid.Fid.Unique = ntohl(de->fid.vunique);
314 if ((avc->f.states & CForeign) == 0 && (ntohl(de->fid.vnode) & 1)) {
316 } else if ((tvc = afs_FindVCache(&afid, 0, 0))) {
319 } else if (((tvc->f.states) & (CStatd | CTruth))) {
320 /* CTruth will be set if the object has
325 else if (vtype == VREG)
327 /* Don't do this until we're sure it can't be a mtpt */
328 /* else if (vtype == VLNK)
330 /* what other types does AFS support? */
332 /* clean up from afs_FindVCache */
336 * If this is NFS readdirplus, then the filler is going to
337 * call getattr on this inode, which will deadlock if we're
341 code = (*filldir) (dirbuf, de->name, len, offset, ino, type);
347 offset = dirpos + 1 + ((len + 16) >> 5);
349 /* If filldir didn't fill in the last one this is still pointing to that
352 fp->f_pos = (loff_t) offset;
354 ReleaseReadLock(&tdc->lock);
356 UpgradeSToWLock(&avc->lock, 813);
357 avc->f.states &= ~CReadDir;
359 avc->readdir_pid = 0;
360 ReleaseSharedLock(&avc->lock);
364 afs_PutFakeStat(&fakestat);
367 afs_maybe_unlock_kernel();
372 /* in afs_pioctl.c */
373 extern int afs_xioctl(struct inode *ip, struct file *fp, unsigned int com,
376 #if defined(HAVE_UNLOCKED_IOCTL) || defined(HAVE_COMPAT_IOCTL)
377 static long afs_unlocked_xioctl(struct file *fp, unsigned int com,
379 return afs_xioctl(FILE_INODE(fp), fp, com, arg);
386 afs_linux_mmap(struct file *fp, struct vm_area_struct *vmap)
388 struct vcache *vcp = VTOAFS(FILE_INODE(fp));
392 afs_Trace3(afs_iclSetp, CM_TRACE_GMAP, ICL_TYPE_POINTER, vcp,
393 ICL_TYPE_POINTER, vmap->vm_start, ICL_TYPE_INT32,
394 vmap->vm_end - vmap->vm_start);
396 /* get a validated vcache entry */
397 code = afs_linux_VerifyVCache(vcp, NULL);
399 /* Linux's Flushpage implementation doesn't use credp, so optimise
400 * our code to not need to crref() it */
401 osi_FlushPages(vcp, NULL); /* ensure stale pages are gone */
403 code = generic_file_mmap(fp, vmap);
406 vcp->f.states |= CMAPPED;
413 afs_linux_open(struct inode *ip, struct file *fp)
415 struct vcache *vcp = VTOAFS(ip);
416 cred_t *credp = crref();
419 afs_maybe_lock_kernel();
421 code = afs_open(&vcp, fp->f_flags, credp);
423 afs_maybe_unlock_kernel();
426 return afs_convert_code(code);
430 afs_linux_release(struct inode *ip, struct file *fp)
432 struct vcache *vcp = VTOAFS(ip);
433 cred_t *credp = crref();
436 afs_maybe_lock_kernel();
438 code = afs_close(vcp, fp->f_flags, credp);
440 afs_maybe_unlock_kernel();
443 return afs_convert_code(code);
447 afs_linux_fsync(struct file *fp, struct dentry *dp, int datasync)
450 struct inode *ip = FILE_INODE(fp);
451 cred_t *credp = crref();
453 afs_maybe_lock_kernel();
455 code = afs_fsync(VTOAFS(ip), credp);
457 afs_maybe_unlock_kernel();
459 return afs_convert_code(code);
465 afs_linux_lock(struct file *fp, int cmd, struct file_lock *flp)
468 struct vcache *vcp = VTOAFS(FILE_INODE(fp));
469 cred_t *credp = crref();
470 struct AFS_FLOCK flock;
472 /* Convert to a lock format afs_lockctl understands. */
473 memset((char *)&flock, 0, sizeof(flock));
474 flock.l_type = flp->fl_type;
475 flock.l_pid = flp->fl_pid;
477 flock.l_start = flp->fl_start;
478 flock.l_len = flp->fl_end - flp->fl_start + 1;
480 /* Safe because there are no large files, yet */
481 #if defined(F_GETLK64) && (F_GETLK != F_GETLK64)
482 if (cmd == F_GETLK64)
484 else if (cmd == F_SETLK64)
486 else if (cmd == F_SETLKW64)
488 #endif /* F_GETLK64 && F_GETLK != F_GETLK64 */
491 code = afs_lockctl(vcp, &flock, cmd, credp);
494 if ((code == 0 || flp->fl_type == F_UNLCK) &&
495 (cmd == F_SETLK || cmd == F_SETLKW)) {
496 code = afs_posix_lock_file(fp, flp);
497 if (code && flp->fl_type != F_UNLCK) {
498 struct AFS_FLOCK flock2;
500 flock2.l_type = F_UNLCK;
502 afs_lockctl(vcp, &flock2, F_SETLK, credp);
506 /* If lockctl says there are no conflicting locks, then also check with the
507 * kernel, as lockctl knows nothing about byte range locks
509 if (code == 0 && cmd == F_GETLK && flock.l_type == F_UNLCK) {
510 afs_posix_test_lock(fp, flp);
511 /* If we found a lock in the kernel's structure, return it */
512 if (flp->fl_type != F_UNLCK) {
518 /* Convert flock back to Linux's file_lock */
519 flp->fl_type = flock.l_type;
520 flp->fl_pid = flock.l_pid;
521 flp->fl_start = flock.l_start;
522 flp->fl_end = flock.l_start + flock.l_len - 1;
525 return afs_convert_code(code);
528 #ifdef STRUCT_FILE_OPERATIONS_HAS_FLOCK
530 afs_linux_flock(struct file *fp, int cmd, struct file_lock *flp) {
532 struct vcache *vcp = VTOAFS(FILE_INODE(fp));
533 cred_t *credp = crref();
534 struct AFS_FLOCK flock;
535 /* Convert to a lock format afs_lockctl understands. */
536 memset((char *)&flock, 0, sizeof(flock));
537 flock.l_type = flp->fl_type;
538 flock.l_pid = flp->fl_pid;
541 flock.l_len = OFFSET_MAX;
543 /* Safe because there are no large files, yet */
544 #if defined(F_GETLK64) && (F_GETLK != F_GETLK64)
545 if (cmd == F_GETLK64)
547 else if (cmd == F_SETLK64)
549 else if (cmd == F_SETLKW64)
551 #endif /* F_GETLK64 && F_GETLK != F_GETLK64 */
554 code = afs_lockctl(vcp, &flock, cmd, credp);
557 if ((code == 0 || flp->fl_type == F_UNLCK) &&
558 (cmd == F_SETLK || cmd == F_SETLKW)) {
559 flp->fl_flags &=~ FL_SLEEP;
560 code = flock_lock_file_wait(fp, flp);
561 if (code && flp->fl_type != F_UNLCK) {
562 struct AFS_FLOCK flock2;
564 flock2.l_type = F_UNLCK;
566 afs_lockctl(vcp, &flock2, F_SETLK, credp);
570 /* Convert flock back to Linux's file_lock */
571 flp->fl_type = flock.l_type;
572 flp->fl_pid = flock.l_pid;
575 return afs_convert_code(code);
580 * essentially the same as afs_fsync() but we need to get the return
581 * code for the sys_close() here, not afs_linux_release(), so call
582 * afs_StoreAllSegments() with AFS_LASTSTORE
585 #if defined(FOP_FLUSH_TAKES_FL_OWNER_T)
586 afs_linux_flush(struct file *fp, fl_owner_t id)
588 afs_linux_flush(struct file *fp)
591 struct vrequest treq;
595 #if defined(AFS_CACHE_BYPASS)
601 if ((fp->f_flags & O_ACCMODE) == O_RDONLY) { /* readers dont flush */
609 vcp = VTOAFS(FILE_INODE(fp));
611 code = afs_InitReq(&treq, credp);
614 #if defined(AFS_CACHE_BYPASS)
615 /* If caching is bypassed for this file, or globally, just return 0 */
616 if(cache_bypass_strategy == ALWAYS_BYPASS_CACHE)
619 ObtainReadLock(&vcp->lock);
620 if(vcp->cachingStates & FCSBypass)
622 ReleaseReadLock(&vcp->lock);
625 /* future proof: don't rely on 0 return from afs_InitReq */
630 ObtainSharedLock(&vcp->lock, 535);
631 if ((vcp->execsOrWriters > 0) && (file_count(fp) == 1)) {
632 UpgradeSToWLock(&vcp->lock, 536);
633 if (!AFS_IS_DISCONNECTED) {
634 code = afs_StoreAllSegments(vcp,
636 AFS_SYNC | AFS_LASTSTORE);
638 afs_DisconAddDirty(vcp, VDisconWriteOsiFlush, 1);
640 ConvertWToSLock(&vcp->lock);
642 code = afs_CheckCode(code, &treq, 54);
643 ReleaseSharedLock(&vcp->lock);
650 return afs_convert_code(code);
653 struct file_operations afs_dir_fops = {
654 .read = generic_read_dir,
655 .readdir = afs_linux_readdir,
656 #ifdef HAVE_UNLOCKED_IOCTL
657 .unlocked_ioctl = afs_unlocked_xioctl,
661 #ifdef HAVE_COMPAT_IOCTL
662 .compat_ioctl = afs_unlocked_xioctl,
664 .open = afs_linux_open,
665 .release = afs_linux_release,
668 struct file_operations afs_file_fops = {
669 .read = afs_linux_read,
670 .write = afs_linux_write,
671 #ifdef GENERIC_FILE_AIO_READ
672 .aio_read = generic_file_aio_read,
673 .aio_write = generic_file_aio_write,
675 #ifdef HAVE_UNLOCKED_IOCTL
676 .unlocked_ioctl = afs_unlocked_xioctl,
680 #ifdef HAVE_COMPAT_IOCTL
681 .compat_ioctl = afs_unlocked_xioctl,
683 .mmap = afs_linux_mmap,
684 .open = afs_linux_open,
685 .flush = afs_linux_flush,
686 #if defined(STRUCT_FILE_OPERATIONS_HAS_SENDFILE)
687 .sendfile = generic_file_sendfile,
689 #if defined(STRUCT_FILE_OPERATIONS_HAS_SPLICE)
690 .splice_write = generic_file_splice_write,
691 .splice_read = generic_file_splice_read,
693 .release = afs_linux_release,
694 .fsync = afs_linux_fsync,
695 .lock = afs_linux_lock,
696 #ifdef STRUCT_FILE_OPERATIONS_HAS_FLOCK
697 .flock = afs_linux_flock,
702 /**********************************************************************
703 * AFS Linux dentry operations
704 **********************************************************************/
706 /* check_bad_parent() : Checks if this dentry's vcache is a root vcache
707 * that has its mvid (parent dir's fid) pointer set to the wrong directory
708 * due to being mounted in multiple points at once. If so, check_bad_parent()
709 * calls afs_lookup() to correct the vcache's mvid, as well as the volume's
710 * dotdotfid and mtpoint fid members.
712 * dp - dentry to be checked.
716 * This dentry's vcache's mvid will be set to the correct parent directory's
718 * This root vnode's volume will have its dotdotfid and mtpoint fids set
719 * to the correct parent and mountpoint fids.
723 check_bad_parent(struct dentry *dp)
726 struct vcache *vcp = VTOAFS(dp->d_inode), *avc = NULL;
727 struct vcache *pvc = VTOAFS(dp->d_parent->d_inode);
729 if (vcp->mvid->Fid.Volume != pvc->f.fid.Fid.Volume) { /* bad parent */
732 /* force a lookup, so vcp->mvid is fixed up */
733 afs_lookup(pvc, (char *)dp->d_name.name, &avc, credp);
734 if (!avc || vcp != avc) { /* bad, very bad.. */
735 afs_Trace4(afs_iclSetp, CM_TRACE_TMP_1S3L, ICL_TYPE_STRING,
736 "check_bad_parent: bad pointer returned from afs_lookup origvc newvc dentry",
737 ICL_TYPE_POINTER, vcp, ICL_TYPE_POINTER, avc,
738 ICL_TYPE_POINTER, dp);
741 AFS_RELE(AFSTOV(avc));
748 /* afs_linux_revalidate
749 * Ensure vcache is stat'd before use. Return 0 if entry is valid.
752 afs_linux_revalidate(struct dentry *dp)
755 struct vcache *vcp = VTOAFS(dp->d_inode);
759 if (afs_shuttingdown)
762 afs_maybe_lock_kernel();
766 /* Make this a fast path (no crref), since it's called so often. */
767 if (vcp->f.states & CStatd) {
769 if (*dp->d_name.name != '/' && vcp->mvstat == 2) /* root vnode */
770 check_bad_parent(dp); /* check and correct mvid */
778 /* This avoids the crref when we don't have to do it. Watch for
779 * changes in afs_getattr that don't get replicated here!
781 if (vcp->f.states & CStatd &&
782 (!afs_fakestat_enable || vcp->mvstat != 1) &&
784 code = afs_CopyOutAttrs(vcp, &vattr);
787 code = afs_getattr(vcp, &vattr, credp);
791 afs_fill_inode(AFSTOV(vcp), &vattr);
794 afs_maybe_unlock_kernel();
796 return afs_convert_code(code);
800 afs_linux_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
802 int err = afs_linux_revalidate(dentry);
804 generic_fillattr(dentry->d_inode, stat);
809 /* Validate a dentry. Return 1 if unchanged, 0 if VFS layer should re-evaluate.
810 * In kernels 2.2.10 and above, we are passed an additional flags var which
811 * may have either the LOOKUP_FOLLOW OR LOOKUP_DIRECTORY set in which case
812 * we are advised to follow the entry if it is a link or to make sure that
813 * it is a directory. But since the kernel itself checks these possibilities
814 * later on, we shouldn't have to do it until later. Perhaps in the future..
817 #ifdef DOP_REVALIDATE_TAKES_NAMEIDATA
818 afs_linux_dentry_revalidate(struct dentry *dp, struct nameidata *nd)
820 afs_linux_dentry_revalidate(struct dentry *dp, int flags)
824 cred_t *credp = NULL;
825 struct vcache *vcp, *pvcp, *tvc = NULL;
827 struct afs_fakestat_state fakestate;
829 afs_maybe_lock_kernel();
831 afs_InitFakeStat(&fakestate);
835 vcp = VTOAFS(dp->d_inode);
836 pvcp = VTOAFS(dp->d_parent->d_inode); /* dget_parent()? */
838 if (vcp == afs_globalVp)
841 if (vcp->mvstat == 1) { /* mount point */
842 if (vcp->mvid && (vcp->f.states & CMValid)) {
845 struct vrequest treq;
848 code = afs_InitReq(&treq, credp);
850 (strcmp(dp->d_name.name, ".directory") == 0)) {
854 code = afs_TryEvalFakeStat(&vcp, &fakestate, &treq);
856 code = afs_EvalFakeStat(&vcp, &fakestate, &treq);
857 if ((tryEvalOnly && vcp->mvstat == 1) || code) {
858 /* a mount point, not yet replaced by its directory */
863 if (*dp->d_name.name != '/' && vcp->mvstat == 2) /* root vnode */
864 check_bad_parent(dp); /* check and correct mvid */
867 /* If the last looker changes, we should make sure the current
868 * looker still has permission to examine this file. This would
869 * always require a crref() which would be "slow".
871 if (vcp->last_looker != treq.uid) {
872 if (!afs_AccessOK(vcp, (vType(vcp) == VREG) ? PRSFS_READ : PRSFS_LOOKUP, &treq, CHECK_MODE_BITS))
875 vcp->last_looker = treq.uid;
879 /* If the parent's DataVersion has changed or the vnode
880 * is longer valid, we need to do a full lookup. VerifyVCache
881 * isn't enough since the vnode may have been renamed.
884 if (hgetlo(pvcp->f.m.DataVersion) > dp->d_time || !(vcp->f.states & CStatd)) {
887 afs_lookup(pvcp, (char *)dp->d_name.name, &tvc, credp);
888 if (!tvc || tvc != vcp)
891 if (afs_getattr(vcp, &vattr, credp))
894 vattr2inode(AFSTOV(vcp), &vattr);
895 dp->d_time = hgetlo(pvcp->f.m.DataVersion);
898 /* should we always update the attributes at this point? */
899 /* unlikely--the vcache entry hasn't changed */
903 pvcp = VTOAFS(dp->d_parent->d_inode); /* dget_parent()? */
904 if (hgetlo(pvcp->f.m.DataVersion) > dp->d_time)
908 /* No change in parent's DataVersion so this negative
909 * lookup is still valid. BUT, if a server is down a
910 * negative lookup can result so there should be a
911 * liftime as well. For now, always expire.
924 afs_PutFakeStat(&fakestate);
930 shrink_dcache_parent(dp);
933 afs_maybe_unlock_kernel();
937 if (have_submounts(dp))
945 afs_dentry_iput(struct dentry *dp, struct inode *ip)
947 struct vcache *vcp = VTOAFS(ip);
950 if (!AFS_IS_DISCONNECTED || (vcp->f.states & CUnlinked)) {
951 (void) afs_InactiveVCache(vcp, NULL);
954 afs_linux_clear_nfsfs_renamed(dp);
960 afs_dentry_delete(struct dentry *dp)
962 if (dp->d_inode && (VTOAFS(dp->d_inode)->f.states & CUnlinked))
963 return 1; /* bad inode? */
968 struct dentry_operations afs_dentry_operations = {
969 .d_revalidate = afs_linux_dentry_revalidate,
970 .d_delete = afs_dentry_delete,
971 .d_iput = afs_dentry_iput,
974 /**********************************************************************
975 * AFS Linux inode operations
976 **********************************************************************/
980 * Merely need to set enough of vattr to get us through the create. Note
981 * that the higher level code (open_namei) will take care of any tuncation
982 * explicitly. Exclusive open is also taken care of in open_namei.
984 * name is in kernel space at this point.
987 #ifdef IOP_CREATE_TAKES_NAMEIDATA
988 afs_linux_create(struct inode *dip, struct dentry *dp, int mode,
989 struct nameidata *nd)
991 afs_linux_create(struct inode *dip, struct dentry *dp, int mode)
995 cred_t *credp = crref();
996 const char *name = dp->d_name.name;
1001 vattr.va_mode = mode;
1002 vattr.va_type = mode & S_IFMT;
1004 afs_maybe_lock_kernel();
1006 code = afs_create(VTOAFS(dip), (char *)name, &vattr, NONEXCL, mode,
1010 struct inode *ip = AFSTOV(vcp);
1012 afs_getattr(vcp, &vattr, credp);
1013 afs_fill_inode(ip, &vattr);
1014 insert_inode_hash(ip);
1015 dp->d_op = &afs_dentry_operations;
1016 dp->d_time = hgetlo(VTOAFS(dip)->f.m.DataVersion);
1017 d_instantiate(dp, ip);
1021 afs_maybe_unlock_kernel();
1023 return afs_convert_code(code);
1026 /* afs_linux_lookup */
1027 static struct dentry *
1028 #ifdef IOP_LOOKUP_TAKES_NAMEIDATA
1029 afs_linux_lookup(struct inode *dip, struct dentry *dp,
1030 struct nameidata *nd)
1032 afs_linux_lookup(struct inode *dip, struct dentry *dp)
1035 cred_t *credp = crref();
1036 struct vcache *vcp = NULL;
1037 const char *comp = dp->d_name.name;
1038 struct inode *ip = NULL;
1039 struct dentry *newdp = NULL;
1042 afs_maybe_lock_kernel();
1044 code = afs_lookup(VTOAFS(dip), (char *)comp, &vcp, credp);
1050 afs_getattr(vcp, &vattr, credp);
1051 afs_fill_inode(ip, &vattr);
1052 if (hlist_unhashed(&ip->i_hash))
1053 insert_inode_hash(ip);
1055 dp->d_op = &afs_dentry_operations;
1056 dp->d_time = hgetlo(VTOAFS(dip)->f.m.DataVersion);
1059 if (ip && S_ISDIR(ip->i_mode)) {
1060 struct dentry *alias;
1062 /* Try to invalidate an existing alias in favor of our new one */
1063 alias = d_find_alias(ip);
1064 /* But not if it's disconnected; then we want d_splice_alias below */
1065 if (alias && !(alias->d_flags & DCACHE_DISCONNECTED)) {
1066 if (d_invalidate(alias) == 0) {
1076 newdp = d_splice_alias(ip, dp);
1078 afs_maybe_unlock_kernel();
1081 /* It's ok for the file to not be found. That's noted by the caller by
1082 * seeing that the dp->d_inode field is NULL.
1084 if (!code || code == ENOENT)
1087 return ERR_PTR(afs_convert_code(code));
1091 afs_linux_link(struct dentry *olddp, struct inode *dip, struct dentry *newdp)
1094 cred_t *credp = crref();
1095 const char *name = newdp->d_name.name;
1096 struct inode *oldip = olddp->d_inode;
1098 /* If afs_link returned the vnode, we could instantiate the
1099 * dentry. Since it's not, we drop this one and do a new lookup.
1104 code = afs_link(VTOAFS(oldip), VTOAFS(dip), (char *)name, credp);
1108 return afs_convert_code(code);
1112 afs_linux_unlink(struct inode *dip, struct dentry *dp)
1115 cred_t *credp = crref();
1116 const char *name = dp->d_name.name;
1117 struct vcache *tvc = VTOAFS(dp->d_inode);
1119 afs_maybe_lock_kernel();
1120 if (VREFCOUNT(tvc) > 1 && tvc->opens > 0
1121 && !(tvc->f.states & CUnlinked)) {
1122 struct dentry *__dp;
1132 osi_FreeSmallSpace(__name);
1133 __name = afs_newname();
1136 __dp = lookup_one_len(__name, dp->d_parent, strlen(__name));
1140 } while (__dp->d_inode != NULL);
1143 code = afs_rename(VTOAFS(dip), (char *)dp->d_name.name, VTOAFS(dip), (char *)__dp->d_name.name, credp);
1145 tvc->mvid = (void *) __name;
1148 crfree(tvc->uncred);
1150 tvc->uncred = credp;
1151 tvc->f.states |= CUnlinked;
1152 afs_linux_set_nfsfs_renamed(dp);
1154 osi_FreeSmallSpace(__name);
1159 __dp->d_time = hgetlo(VTOAFS(dip)->f.m.DataVersion);
1168 code = afs_remove(VTOAFS(dip), (char *)name, credp);
1173 afs_maybe_unlock_kernel();
1175 return afs_convert_code(code);
1180 afs_linux_symlink(struct inode *dip, struct dentry *dp, const char *target)
1183 cred_t *credp = crref();
1185 const char *name = dp->d_name.name;
1187 /* If afs_symlink returned the vnode, we could instantiate the
1188 * dentry. Since it's not, we drop this one and do a new lookup.
1194 code = afs_symlink(VTOAFS(dip), (char *)name, &vattr, (char *)target, credp);
1197 return afs_convert_code(code);
1201 afs_linux_mkdir(struct inode *dip, struct dentry *dp, int mode)
1204 cred_t *credp = crref();
1205 struct vcache *tvcp = NULL;
1207 const char *name = dp->d_name.name;
1209 afs_maybe_lock_kernel();
1211 vattr.va_mask = ATTR_MODE;
1212 vattr.va_mode = mode;
1214 code = afs_mkdir(VTOAFS(dip), (char *)name, &vattr, &tvcp, credp);
1217 struct inode *ip = AFSTOV(tvcp);
1219 afs_getattr(tvcp, &vattr, credp);
1220 afs_fill_inode(ip, &vattr);
1222 dp->d_op = &afs_dentry_operations;
1223 dp->d_time = hgetlo(VTOAFS(dip)->f.m.DataVersion);
1224 d_instantiate(dp, ip);
1228 afs_maybe_unlock_kernel();
1230 return afs_convert_code(code);
1234 afs_linux_rmdir(struct inode *dip, struct dentry *dp)
1237 cred_t *credp = crref();
1238 const char *name = dp->d_name.name;
1240 /* locking kernel conflicts with glock? */
1243 code = afs_rmdir(VTOAFS(dip), (char *)name, credp);
1246 /* Linux likes to see ENOTEMPTY returned from an rmdir() syscall
1247 * that failed because a directory is not empty. So, we map
1248 * EEXIST to ENOTEMPTY on linux.
1250 if (code == EEXIST) {
1259 return afs_convert_code(code);
1264 afs_linux_rename(struct inode *oldip, struct dentry *olddp,
1265 struct inode *newip, struct dentry *newdp)
1268 cred_t *credp = crref();
1269 const char *oldname = olddp->d_name.name;
1270 const char *newname = newdp->d_name.name;
1271 struct dentry *rehash = NULL;
1273 /* Prevent any new references during rename operation. */
1274 afs_maybe_lock_kernel();
1276 if (!d_unhashed(newdp)) {
1281 if (atomic_read(&olddp->d_count) > 1)
1282 shrink_dcache_parent(olddp);
1285 code = afs_rename(VTOAFS(oldip), (char *)oldname, VTOAFS(newip), (char *)newname, credp);
1289 olddp->d_time = 0; /* force to revalidate */
1294 afs_maybe_unlock_kernel();
1297 return afs_convert_code(code);
1301 /* afs_linux_ireadlink
1302 * Internal readlink which can return link contents to user or kernel space.
1303 * Note that the buffer is NOT supposed to be null-terminated.
1306 afs_linux_ireadlink(struct inode *ip, char *target, int maxlen, uio_seg_t seg)
1309 cred_t *credp = crref();
1313 setup_uio(&tuio, &iov, target, (afs_offs_t) 0, maxlen, UIO_READ, seg);
1314 code = afs_readlink(VTOAFS(ip), &tuio, credp);
1318 return maxlen - tuio.uio_resid;
1320 return afs_convert_code(code);
1323 #if !defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
1324 /* afs_linux_readlink
1325 * Fill target (which is in user space) with contents of symlink.
1328 afs_linux_readlink(struct dentry *dp, char *target, int maxlen)
1331 struct inode *ip = dp->d_inode;
1334 code = afs_linux_ireadlink(ip, target, maxlen, AFS_UIOUSER);
1340 /* afs_linux_follow_link
1341 * a file system dependent link following routine.
1343 static int afs_linux_follow_link(struct dentry *dentry, struct nameidata *nd)
1348 name = osi_Alloc(PATH_MAX);
1354 code = afs_linux_ireadlink(dentry->d_inode, name, PATH_MAX - 1, AFS_UIOSYS);
1362 code = vfs_follow_link(nd, name);
1365 osi_Free(name, PATH_MAX);
1370 #endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
1372 #if defined(AFS_CACHE_BYPASS)
1373 #endif /* defined(AFS_CACHE_BYPASS */
1375 /* Populate a page by filling it from the cache file pointed at by cachefp
1376 * (which contains indicated chunk)
1377 * If task is NULL, the page copy occurs syncronously, and the routine
1378 * returns with page still locked. If task is non-NULL, then page copies
1379 * may occur in the background, and the page will be unlocked when it is
1383 afs_linux_read_cache(struct file *cachefp, struct page *page,
1384 int chunk, struct pagevec *lrupv,
1385 struct afs_pagecopy_task *task) {
1386 loff_t offset = page_offset(page);
1387 struct page *newpage, *cachepage;
1388 struct address_space *cachemapping;
1392 cachemapping = cachefp->f_dentry->d_inode->i_mapping;
1396 /* From our offset, we now need to work out which page in the disk
1397 * file it corresponds to. This will be fun ... */
1398 pageindex = (offset - AFS_CHUNKTOBASE(chunk)) >> PAGE_CACHE_SHIFT;
1400 while (cachepage == NULL) {
1401 cachepage = find_get_page(cachemapping, pageindex);
1404 newpage = page_cache_alloc_cold(cachemapping);
1410 code = add_to_page_cache(newpage, cachemapping,
1411 pageindex, GFP_KERNEL);
1413 cachepage = newpage;
1416 page_cache_get(cachepage);
1417 if (!pagevec_add(lrupv, cachepage))
1418 __pagevec_lru_add_file(lrupv);
1421 page_cache_release(newpage);
1423 if (code != -EEXIST)
1427 lock_page(cachepage);
1431 if (!PageUptodate(cachepage)) {
1432 ClearPageError(cachepage);
1433 code = cachemapping->a_ops->readpage(NULL, cachepage);
1434 if (!code && !task) {
1435 wait_on_page_locked(cachepage);
1438 unlock_page(cachepage);
1442 if (PageUptodate(cachepage)) {
1443 copy_highpage(page, cachepage);
1444 flush_dcache_page(page);
1445 SetPageUptodate(page);
1450 afs_pagecopy_queue_page(task, cachepage, page);
1462 page_cache_release(cachepage);
1468 afs_linux_readpage_fastpath(struct file *fp, struct page *pp, int *codep)
1470 loff_t offset = page_offset(pp);
1471 struct inode *ip = FILE_INODE(fp);
1472 struct vcache *avc = VTOAFS(ip);
1474 struct file *cacheFp = NULL;
1477 struct pagevec lrupv;
1479 /* Not a UFS cache, don't do anything */
1480 if (cacheDiskType != AFS_FCACHE_TYPE_UFS)
1483 /* Can't do anything if the vcache isn't statd , or if the read
1484 * crosses a chunk boundary.
1486 if (!(avc->f.states & CStatd) ||
1487 AFS_CHUNK(offset) != AFS_CHUNK(offset + PAGE_SIZE)) {
1491 ObtainWriteLock(&avc->lock, 911);
1493 /* XXX - See if hinting actually makes things faster !!! */
1495 /* See if we have a suitable entry already cached */
1499 /* We need to lock xdcache, then dcache, to handle situations where
1500 * the hint is on the free list. However, we can't safely do this
1501 * according to the locking hierarchy. So, use a non blocking lock.
1503 ObtainReadLock(&afs_xdcache);
1504 dcLocked = ( 0 == NBObtainReadLock(&tdc->lock));
1506 if (dcLocked && (tdc->index != NULLIDX)
1507 && !FidCmp(&tdc->f.fid, &avc->f.fid)
1508 && tdc->f.chunk == AFS_CHUNK(offset)
1509 && !(afs_indexFlags[tdc->index] & (IFFree | IFDiscarded))) {
1510 /* Bonus - the hint was correct */
1513 /* Only destroy the hint if its actually invalid, not if there's
1514 * just been a locking failure */
1516 ReleaseReadLock(&tdc->lock);
1523 ReleaseReadLock(&afs_xdcache);
1526 /* No hint, or hint is no longer valid - see if we can get something
1527 * directly from the dcache
1530 tdc = afs_FindDCache(avc, offset);
1533 ReleaseWriteLock(&avc->lock);
1538 ObtainReadLock(&tdc->lock);
1540 /* Is the dcache we've been given currently up to date */
1541 if (!hsame(avc->f.m.DataVersion, tdc->f.versionNo) ||
1542 (tdc->dflags & DFFetching)) {
1543 ReleaseWriteLock(&avc->lock);
1544 ReleaseReadLock(&tdc->lock);
1549 /* Update our hint for future abuse */
1552 /* Okay, so we've now got a cache file that is up to date */
1554 /* XXX - I suspect we should be locking the inodes before we use them! */
1556 cacheFp = afs_linux_raw_open(&tdc->f.inode, NULL);
1557 pagevec_init(&lrupv, 0);
1559 code = afs_linux_read_cache(cacheFp, pp, tdc->f.chunk, &lrupv, NULL);
1561 if (pagevec_count(&lrupv))
1562 __pagevec_lru_add_file(&lrupv);
1564 filp_close(cacheFp, NULL);
1567 ReleaseReadLock(&tdc->lock);
1568 ReleaseWriteLock(&avc->lock);
1575 /* afs_linux_readpage
1577 * This function is split into two, because prepare_write/begin_write
1578 * require a readpage call which doesn't unlock the resulting page upon
1582 afs_linux_fillpage(struct file *fp, struct page *pp)
1587 struct iovec *iovecp;
1588 struct inode *ip = FILE_INODE(fp);
1589 afs_int32 cnt = page_count(pp);
1590 struct vcache *avc = VTOAFS(ip);
1591 afs_offs_t offset = page_offset(pp);
1595 if (afs_linux_readpage_fastpath(fp, pp, &code)) {
1605 auio = osi_Alloc(sizeof(uio_t));
1606 iovecp = osi_Alloc(sizeof(struct iovec));
1608 setup_uio(auio, iovecp, (char *)address, offset, PAGE_SIZE, UIO_READ,
1611 afs_maybe_lock_kernel();
1614 afs_Trace4(afs_iclSetp, CM_TRACE_READPAGE, ICL_TYPE_POINTER, ip,
1615 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, cnt, ICL_TYPE_INT32,
1616 99999); /* not a possible code value */
1618 code = afs_rdwr(avc, auio, UIO_READ, 0, credp);
1620 afs_Trace4(afs_iclSetp, CM_TRACE_READPAGE, ICL_TYPE_POINTER, ip,
1621 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, cnt, ICL_TYPE_INT32,
1623 AFS_DISCON_UNLOCK();
1625 afs_maybe_unlock_kernel();
1627 /* XXX valid for no-cache also? Check last bits of files... :)
1628 * Cognate code goes in afs_NoCacheFetchProc. */
1629 if (auio->uio_resid) /* zero remainder of page */
1630 memset((void *)(address + (PAGE_SIZE - auio->uio_resid)), 0,
1633 flush_dcache_page(pp);
1634 SetPageUptodate(pp);
1639 osi_Free(auio, sizeof(uio_t));
1640 osi_Free(iovecp, sizeof(struct iovec));
1643 return afs_convert_code(code);
1647 afs_linux_prefetch(struct file *fp, struct page *pp)
1650 struct vcache *avc = VTOAFS(FILE_INODE(fp));
1651 afs_offs_t offset = page_offset(pp);
1653 if (AFS_CHUNKOFFSET(offset) == 0) {
1655 struct vrequest treq;
1660 code = afs_InitReq(&treq, credp);
1661 if (!code && !NBObtainWriteLock(&avc->lock, 534)) {
1662 tdc = afs_FindDCache(avc, offset);
1664 if (!(tdc->mflags & DFNextStarted))
1665 afs_PrefetchChunk(avc, tdc, credp, &treq);
1668 ReleaseWriteLock(&avc->lock);
1673 return afs_convert_code(code);
1677 #if defined(AFS_CACHE_BYPASS)
1680 afs_linux_bypass_readpages(struct file *fp, struct address_space *mapping,
1681 struct list_head *page_list, unsigned num_pages)
1686 struct iovec* iovecp;
1687 struct nocache_read_request *ancr;
1688 struct page *pp, *ppt;
1689 struct pagevec lrupv;
1693 struct inode *ip = FILE_INODE(fp);
1694 struct vcache *avc = VTOAFS(ip);
1695 afs_int32 base_index = 0;
1696 afs_int32 page_count = 0;
1699 /* background thread must free: iovecp, auio, ancr */
1700 iovecp = osi_Alloc(num_pages * sizeof(struct iovec));
1702 auio = osi_Alloc(sizeof(uio_t));
1703 auio->uio_iov = iovecp;
1704 auio->uio_iovcnt = num_pages;
1705 auio->uio_flag = UIO_READ;
1706 auio->uio_seg = AFS_UIOSYS;
1707 auio->uio_resid = num_pages * PAGE_SIZE;
1709 ancr = osi_Alloc(sizeof(struct nocache_read_request));
1711 ancr->offset = auio->uio_offset;
1712 ancr->length = auio->uio_resid;
1714 pagevec_init(&lrupv, 0);
1716 for(page_ix = 0; page_ix < num_pages; ++page_ix) {
1718 if(list_empty(page_list))
1721 pp = list_entry(page_list->prev, struct page, lru);
1722 /* If we allocate a page and don't remove it from page_list,
1723 * the page cache gets upset. */
1725 isize = (i_size_read(fp->f_mapping->host) - 1) >> PAGE_CACHE_SHIFT;
1726 if(pp->index > isize) {
1733 offset = page_offset(pp);
1734 auio->uio_offset = offset;
1735 base_index = pp->index;
1737 iovecp[page_ix].iov_len = PAGE_SIZE;
1738 code = add_to_page_cache(pp, mapping, pp->index, GFP_KERNEL);
1739 if(base_index != pp->index) {
1742 page_cache_release(pp);
1743 iovecp[page_ix].iov_base = (void *) 0;
1751 page_cache_release(pp);
1752 iovecp[page_ix].iov_base = (void *) 0;
1755 if(!PageLocked(pp)) {
1759 /* save the page for background map */
1760 iovecp[page_ix].iov_base = (void*) pp;
1762 /* and put it on the LRU cache */
1763 if (!pagevec_add(&lrupv, pp))
1764 __pagevec_lru_add(&lrupv);
1768 /* If there were useful pages in the page list, make sure all pages
1769 * are in the LRU cache, then schedule the read */
1771 pagevec_lru_add(&lrupv);
1773 code = afs_ReadNoCache(avc, ancr, credp);
1776 /* If there is nothing for the background thread to handle,
1777 * it won't be freeing the things that we never gave it */
1778 osi_Free(iovecp, num_pages * sizeof(struct iovec));
1779 osi_Free(auio, sizeof(uio_t));
1780 osi_Free(ancr, sizeof(struct nocache_read_request));
1782 /* we do not flush, release, or unmap pages--that will be
1783 * done for us by the background thread as each page comes in
1784 * from the fileserver */
1786 return afs_convert_code(code);
1791 afs_linux_bypass_readpage(struct file *fp, struct page *pp)
1793 cred_t *credp = NULL;
1795 struct iovec *iovecp;
1796 struct nocache_read_request *ancr;
1801 /* receiver frees */
1802 auio = osi_Alloc(sizeof(uio_t));
1803 iovecp = osi_Alloc(sizeof(struct iovec));
1805 /* address can be NULL, because we overwrite it with 'pp', below */
1806 setup_uio(auio, iovecp, NULL, page_offset(pp),
1807 PAGE_SIZE, UIO_READ, AFS_UIOSYS);
1809 /* save the page for background map */
1810 /* XXX - Shouldn't we get a reference count here? */
1811 auio->uio_iov->iov_base = (void*) pp;
1812 /* the background thread will free this */
1813 ancr = osi_Alloc(sizeof(struct nocache_read_request));
1815 ancr->offset = offset;
1816 ancr->length = PAGE_SIZE;
1819 afs_maybe_lock_kernel();
1820 code = afs_ReadNoCache(VTOAFS(FILE_INODE(fp)), ancr, credp);
1821 afs_maybe_unlock_kernel();
1824 return afs_convert_code(code);
1828 afs_linux_can_bypass(struct inode *ip) {
1829 switch(cache_bypass_strategy) {
1830 case NEVER_BYPASS_CACHE:
1832 case ALWAYS_BYPASS_CACHE:
1834 case LARGE_FILES_BYPASS_CACHE:
1835 if(i_size_read(ip) > cache_bypass_threshold)
1842 /* Check if a file is permitted to bypass the cache by policy, and modify
1843 * the cache bypass state recorded for that file */
1846 afs_linux_bypass_check(struct inode *ip) {
1849 int bypass = afs_linux_can_bypass(ip);
1852 trydo_cache_transition(VTOAFS(ip)), credp, bypass);
1860 afs_linux_bypass_check(struct inode *ip) {
1864 afs_linux_bypass_readpage(struct file *fp, struct page *pp) {
1868 afs_linux_bypass_readpages(struct file *fp, struct address_space *mapping,
1869 struct list_head *page_list, unsigned int num_pages) {
1875 afs_linux_readpage(struct file *fp, struct page *pp)
1879 if (afs_linux_bypass_check(FILE_INODE(fp))) {
1880 code = afs_linux_bypass_readpage(fp, pp);
1882 code = afs_linux_fillpage(fp, pp);
1884 code = afs_linux_prefetch(fp, pp);
1891 /* Readpages reads a number of pages for a particular file. We use
1892 * this to optimise the reading, by limiting the number of times upon which
1893 * we have to lookup, lock and open vcaches and dcaches
1897 afs_linux_readpages(struct file *fp, struct address_space *mapping,
1898 struct list_head *page_list, unsigned int num_pages)
1900 struct inode *inode = mapping->host;
1901 struct vcache *avc = VTOAFS(inode);
1903 struct file *cacheFp = NULL;
1905 unsigned int page_idx;
1907 struct pagevec lrupv;
1908 struct afs_pagecopy_task *task;
1910 if (afs_linux_bypass_check(inode))
1911 return afs_linux_bypass_readpages(fp, mapping, page_list, num_pages);
1914 if ((code = afs_linux_VerifyVCache(avc, NULL))) {
1919 ObtainWriteLock(&avc->lock, 912);
1922 task = afs_pagecopy_init_task();
1925 pagevec_init(&lrupv, 0);
1926 for (page_idx = 0; page_idx < num_pages; page_idx++) {
1927 struct page *page = list_entry(page_list->prev, struct page, lru);
1928 list_del(&page->lru);
1929 offset = page_offset(page);
1931 if (tdc && tdc->f.chunk != AFS_CHUNK(offset)) {
1933 ReleaseReadLock(&tdc->lock);
1938 filp_close(cacheFp, NULL);
1943 if ((tdc = afs_FindDCache(avc, offset))) {
1944 ObtainReadLock(&tdc->lock);
1945 if (!hsame(avc->f.m.DataVersion, tdc->f.versionNo) ||
1946 (tdc->dflags & DFFetching)) {
1947 ReleaseReadLock(&tdc->lock);
1954 cacheFp = afs_linux_raw_open(&tdc->f.inode, NULL);
1957 if (tdc && !add_to_page_cache(page, mapping, page->index,
1959 page_cache_get(page);
1960 if (!pagevec_add(&lrupv, page))
1961 __pagevec_lru_add_file(&lrupv);
1963 afs_linux_read_cache(cacheFp, page, tdc->f.chunk, &lrupv, task);
1965 page_cache_release(page);
1967 if (pagevec_count(&lrupv))
1968 __pagevec_lru_add_file(&lrupv);
1971 filp_close(cacheFp, NULL);
1973 afs_pagecopy_put_task(task);
1977 ReleaseReadLock(&tdc->lock);
1981 ReleaseWriteLock(&avc->lock);
1987 afs_linux_writepage_sync(struct inode *ip, struct page *pp,
1988 unsigned long offset, unsigned int count)
1990 struct vcache *vcp = VTOAFS(ip);
1999 buffer = kmap(pp) + offset;
2000 base = page_offset(pp) + offset;
2003 afs_maybe_lock_kernel();
2005 afs_Trace4(afs_iclSetp, CM_TRACE_UPDATEPAGE, ICL_TYPE_POINTER, vcp,
2006 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, page_count(pp),
2007 ICL_TYPE_INT32, 99999);
2009 ObtainWriteLock(&vcp->lock, 532);
2010 if (vcp->f.states & CPageWrite) {
2011 ReleaseWriteLock(&vcp->lock);
2013 afs_maybe_unlock_kernel();
2016 return AOP_WRITEPAGE_ACTIVATE;
2018 vcp->f.states |= CPageWrite;
2019 ReleaseWriteLock(&vcp->lock);
2021 setup_uio(&tuio, &iovec, buffer, base, count, UIO_WRITE, AFS_UIOSYS);
2023 code = afs_write(vcp, &tuio, f_flags, credp, 0);
2025 i_size_write(ip, vcp->f.m.Length);
2026 ip->i_blocks = ((vcp->f.m.Length + 1023) >> 10) << 1;
2028 ObtainWriteLock(&vcp->lock, 533);
2030 struct vrequest treq;
2032 if (!afs_InitReq(&treq, credp))
2033 code = afs_DoPartialWrite(vcp, &treq);
2035 code = code ? afs_convert_code(code) : count - tuio.uio_resid;
2037 vcp->f.states &= ~CPageWrite;
2038 ReleaseWriteLock(&vcp->lock);
2040 afs_Trace4(afs_iclSetp, CM_TRACE_UPDATEPAGE, ICL_TYPE_POINTER, vcp,
2041 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, page_count(pp),
2042 ICL_TYPE_INT32, code);
2045 afs_maybe_unlock_kernel();
2054 #ifdef AOP_WRITEPAGE_TAKES_WRITEBACK_CONTROL
2055 afs_linux_writepage(struct page *pp, struct writeback_control *wbc)
2057 afs_linux_writepage(struct page *pp)
2060 struct address_space *mapping = pp->mapping;
2061 struct inode *inode;
2062 unsigned int to = PAGE_CACHE_SIZE;
2066 if (PageReclaim(pp)) {
2067 return AOP_WRITEPAGE_ACTIVATE;
2068 /* XXX - Do we need to redirty the page here? */
2073 inode = (struct inode *)mapping->host;
2074 isize = i_size_read(inode);
2076 /* Don't defeat an earlier truncate */
2077 if (page_offset(pp) > isize)
2080 /* If this is the final page, then just write the number of bytes that
2081 * are actually in it */
2082 if ((isize - page_offset(pp)) < to )
2083 to = isize - page_offset(pp);
2085 status = afs_linux_writepage_sync(inode, pp, 0, to);
2088 SetPageUptodate(pp);
2089 if ( status != AOP_WRITEPAGE_ACTIVATE ) {
2090 /* XXX - do we need to redirty the page here? */
2094 page_cache_release(pp);
2102 /* afs_linux_permission
2103 * Check access rights - returns error if can't check or permission denied.
2106 #ifdef IOP_PERMISSION_TAKES_NAMEIDATA
2107 afs_linux_permission(struct inode *ip, int mode, struct nameidata *nd)
2109 afs_linux_permission(struct inode *ip, int mode)
2113 cred_t *credp = crref();
2117 if (mode & MAY_EXEC)
2119 if (mode & MAY_READ)
2121 if (mode & MAY_WRITE)
2123 code = afs_access(VTOAFS(ip), tmp, credp);
2127 return afs_convert_code(code);
2130 #if !defined(HAVE_WRITE_BEGIN)
2132 afs_linux_commit_write(struct file *file, struct page *page, unsigned offset,
2137 code = afs_linux_writepage_sync(file->f_dentry->d_inode, page,
2138 offset, to - offset);
2144 afs_linux_prepare_write(struct file *file, struct page *page, unsigned from,
2152 afs_linux_write_end(struct file *file, struct address_space *mapping,
2153 loff_t pos, unsigned len, unsigned copied,
2154 struct page *page, void *fsdata)
2157 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
2159 code = afs_linux_writepage_sync(file->f_dentry->d_inode, page,
2162 page_cache_release(page);
2167 afs_linux_write_begin(struct file *file, struct address_space *mapping,
2168 loff_t pos, unsigned len, unsigned flags,
2169 struct page **pagep, void **fsdata)
2172 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2173 page = grab_cache_page_write_begin(mapping, index, flags);
2181 static struct inode_operations afs_file_iops = {
2182 .permission = afs_linux_permission,
2183 .getattr = afs_linux_getattr,
2184 .setattr = afs_notify_change,
2187 static struct address_space_operations afs_file_aops = {
2188 .readpage = afs_linux_readpage,
2189 .readpages = afs_linux_readpages,
2190 .writepage = afs_linux_writepage,
2191 #if defined (HAVE_WRITE_BEGIN)
2192 .write_begin = afs_linux_write_begin,
2193 .write_end = afs_linux_write_end,
2195 .commit_write = afs_linux_commit_write,
2196 .prepare_write = afs_linux_prepare_write,
2201 /* Separate ops vector for directories. Linux 2.2 tests type of inode
2202 * by what sort of operation is allowed.....
2205 static struct inode_operations afs_dir_iops = {
2206 .setattr = afs_notify_change,
2207 .create = afs_linux_create,
2208 .lookup = afs_linux_lookup,
2209 .link = afs_linux_link,
2210 .unlink = afs_linux_unlink,
2211 .symlink = afs_linux_symlink,
2212 .mkdir = afs_linux_mkdir,
2213 .rmdir = afs_linux_rmdir,
2214 .rename = afs_linux_rename,
2215 .getattr = afs_linux_getattr,
2216 .permission = afs_linux_permission,
2219 /* We really need a separate symlink set of ops, since do_follow_link()
2220 * determines if it _is_ a link by checking if the follow_link op is set.
2222 #if defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
2224 afs_symlink_filler(struct file *file, struct page *page)
2226 struct inode *ip = (struct inode *)page->mapping->host;
2227 char *p = (char *)kmap(page);
2230 afs_maybe_lock_kernel();
2232 code = afs_linux_ireadlink(ip, p, PAGE_SIZE, AFS_UIOSYS);
2237 p[code] = '\0'; /* null terminate? */
2238 afs_maybe_unlock_kernel();
2240 SetPageUptodate(page);
2246 afs_maybe_unlock_kernel();
2254 static struct address_space_operations afs_symlink_aops = {
2255 .readpage = afs_symlink_filler
2257 #endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
2259 static struct inode_operations afs_symlink_iops = {
2260 #if defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
2261 .readlink = page_readlink,
2262 # if defined(HAVE_KERNEL_PAGE_FOLLOW_LINK)
2263 .follow_link = page_follow_link,
2265 .follow_link = page_follow_link_light,
2266 .put_link = page_put_link,
2268 #else /* !defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE) */
2269 .readlink = afs_linux_readlink,
2270 .follow_link = afs_linux_follow_link,
2271 #endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
2272 .setattr = afs_notify_change,
2276 afs_fill_inode(struct inode *ip, struct vattr *vattr)
2280 vattr2inode(ip, vattr);
2282 ip->i_mapping->backing_dev_info = &afs_backing_dev_info;
2283 /* Reset ops if symlink or directory. */
2284 if (S_ISREG(ip->i_mode)) {
2285 ip->i_op = &afs_file_iops;
2286 ip->i_fop = &afs_file_fops;
2287 ip->i_data.a_ops = &afs_file_aops;
2289 } else if (S_ISDIR(ip->i_mode)) {
2290 ip->i_op = &afs_dir_iops;
2291 ip->i_fop = &afs_dir_fops;
2293 } else if (S_ISLNK(ip->i_mode)) {
2294 ip->i_op = &afs_symlink_iops;
2295 #if defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
2296 ip->i_data.a_ops = &afs_symlink_aops;
2297 ip->i_mapping = &ip->i_data;