2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
11 * Linux specific vnodeops. Also includes the glue routines required to call
14 * So far the only truly scary part is that Linux relies on the inode cache
15 * to be up to date. Don't you dare break a callback and expect an fstat
16 * to give you meaningful information. This appears to be fixed in the 2.1
17 * development kernels. As it is we can fix this now by intercepting the
21 #include <afsconfig.h>
22 #include "afs/param.h"
25 #include "afs/sysincludes.h"
26 #include "afsincludes.h"
27 #include "afs/afs_stats.h"
29 #ifdef HAVE_MM_INLINE_H
30 #include <linux/mm_inline.h>
32 #include <linux/pagemap.h>
33 #include <linux/smp_lock.h>
34 #include <linux/writeback.h>
35 #include <linux/pagevec.h>
37 #include "afs/afs_bypasscache.h"
39 #include "osi_compat.h"
40 #include "osi_pagecopy.h"
42 #ifndef HAVE_LINUX_PAGEVEC_LRU_ADD_FILE
43 #define __pagevec_lru_add_file __pagevec_lru_add
47 #define MAX_ERRNO 1000L
50 extern struct backing_dev_info *afs_backing_dev_info;
52 extern struct vcache *afs_globalVp;
53 extern int afs_notify_change(struct dentry *dp, struct iattr *iattrp);
55 /* This function converts a positive error code from AFS into a negative
56 * code suitable for passing into the Linux VFS layer. It checks that the
57 * error code is within the permissable bounds for the ERR_PTR mechanism.
59 * _All_ error codes which come from the AFS layer should be passed through
60 * this function before being returned to the kernel.
64 afs_convert_code(int code) {
65 if ((code >= 0) && (code <= MAX_ERRNO))
71 /* Linux doesn't require a credp for many functions, and crref is an expensive
72 * operation. This helper function avoids obtaining it for VerifyVCache calls
76 afs_linux_VerifyVCache(struct vcache *avc, cred_t **retcred) {
81 if (avc->f.states & CStatd) {
89 code = afs_InitReq(&treq, credp);
91 code = afs_VerifyVCache2(avc, &treq);
98 return afs_convert_code(code);
102 afs_linux_read(struct file *fp, char *buf, size_t count, loff_t * offp)
105 struct vcache *vcp = VTOAFS(fp->f_dentry->d_inode);
108 afs_Trace4(afs_iclSetp, CM_TRACE_READOP, ICL_TYPE_POINTER, vcp,
109 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
111 code = afs_linux_VerifyVCache(vcp, NULL);
114 /* Linux's FlushPages implementation doesn't ever use credp,
115 * so we optimise by not using it */
116 osi_FlushPages(vcp, NULL); /* ensure stale pages are gone */
118 code = do_sync_read(fp, buf, count, offp);
122 afs_Trace4(afs_iclSetp, CM_TRACE_READOP, ICL_TYPE_POINTER, vcp,
123 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
130 /* Now we have integrated VM for writes as well as reads. generic_file_write
131 * also takes care of re-positioning the pointer if file is open in append
132 * mode. Call fake open/close to ensure we do writes of core dumps.
135 afs_linux_write(struct file *fp, const char *buf, size_t count, loff_t * offp)
138 struct vcache *vcp = VTOAFS(fp->f_dentry->d_inode);
143 afs_Trace4(afs_iclSetp, CM_TRACE_WRITEOP, ICL_TYPE_POINTER, vcp,
144 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
145 (fp->f_flags & O_APPEND) ? 99998 : 99999);
147 code = afs_linux_VerifyVCache(vcp, &credp);
149 ObtainWriteLock(&vcp->lock, 529);
151 ReleaseWriteLock(&vcp->lock);
154 code = do_sync_write(fp, buf, count, offp);
158 ObtainWriteLock(&vcp->lock, 530);
160 if (vcp->execsOrWriters == 1 && !credp)
163 afs_FakeClose(vcp, credp);
164 ReleaseWriteLock(&vcp->lock);
166 afs_Trace4(afs_iclSetp, CM_TRACE_WRITEOP, ICL_TYPE_POINTER, vcp,
167 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
176 extern int BlobScan(struct dcache * afile, afs_int32 ablob);
178 /* This is a complete rewrite of afs_readdir, since we can make use of
179 * filldir instead of afs_readdir_move. Note that changes to vcache/dcache
180 * handling and use of bulkstats will need to be reflected here as well.
183 afs_linux_readdir(struct file *fp, void *dirbuf, filldir_t filldir)
185 struct vcache *avc = VTOAFS(FILE_INODE(fp));
186 struct vrequest treq;
194 afs_size_t origOffset, tlen;
195 cred_t *credp = crref();
196 struct afs_fakestat_state fakestat;
199 AFS_STATCNT(afs_readdir);
201 code = afs_convert_code(afs_InitReq(&treq, credp));
206 afs_InitFakeStat(&fakestat);
207 code = afs_convert_code(afs_EvalFakeStat(&avc, &fakestat, &treq));
211 /* update the cache entry */
213 code = afs_convert_code(afs_VerifyVCache2(avc, &treq));
217 /* get a reference to the entire directory */
218 tdc = afs_GetDCache(avc, (afs_size_t) 0, &treq, &origOffset, &tlen, 1);
224 ObtainSharedLock(&avc->lock, 810);
225 UpgradeSToWLock(&avc->lock, 811);
226 ObtainReadLock(&tdc->lock);
228 * Make sure that the data in the cache is current. There are two
229 * cases we need to worry about:
230 * 1. The cache data is being fetched by another process.
231 * 2. The cache data is no longer valid
233 while ((avc->f.states & CStatd)
234 && (tdc->dflags & DFFetching)
235 && hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
236 ReleaseReadLock(&tdc->lock);
237 ReleaseSharedLock(&avc->lock);
238 afs_osi_Sleep(&tdc->validPos);
239 ObtainSharedLock(&avc->lock, 812);
240 ObtainReadLock(&tdc->lock);
242 if (!(avc->f.states & CStatd)
243 || !hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
244 ReleaseReadLock(&tdc->lock);
245 ReleaseSharedLock(&avc->lock);
250 /* Set the readdir-in-progress flag, and downgrade the lock
251 * to shared so others will be able to acquire a read lock.
253 avc->f.states |= CReadDir;
254 avc->dcreaddir = tdc;
255 avc->readdir_pid = MyPidxx2Pid(MyPidxx);
256 ConvertWToSLock(&avc->lock);
258 /* Fill in until we get an error or we're done. This implementation
259 * takes an offset in units of blobs, rather than bytes.
262 offset = (int) fp->f_pos;
264 dirpos = BlobScan(tdc, offset);
268 de = afs_dir_GetBlob(tdc, dirpos);
272 ino = afs_calc_inum (avc->f.fid.Fid.Volume, ntohl(de->fid.vnode));
275 len = strlen(de->name);
277 printf("afs_linux_readdir: afs_dir_GetBlob failed, null name (inode %lx, dirpos %d)\n",
278 (unsigned long)&tdc->f.inode, dirpos);
280 ReleaseSharedLock(&avc->lock);
286 /* filldir returns -EINVAL when the buffer is full. */
288 unsigned int type = DT_UNKNOWN;
289 struct VenusFid afid;
292 afid.Cell = avc->f.fid.Cell;
293 afid.Fid.Volume = avc->f.fid.Fid.Volume;
294 afid.Fid.Vnode = ntohl(de->fid.vnode);
295 afid.Fid.Unique = ntohl(de->fid.vunique);
296 if ((avc->f.states & CForeign) == 0 && (ntohl(de->fid.vnode) & 1)) {
298 } else if ((tvc = afs_FindVCache(&afid, 0, 0))) {
301 } else if (((tvc->f.states) & (CStatd | CTruth))) {
302 /* CTruth will be set if the object has
307 else if (vtype == VREG)
309 /* Don't do this until we're sure it can't be a mtpt */
310 /* else if (vtype == VLNK)
312 /* what other types does AFS support? */
314 /* clean up from afs_FindVCache */
318 * If this is NFS readdirplus, then the filler is going to
319 * call getattr on this inode, which will deadlock if we're
323 code = (*filldir) (dirbuf, de->name, len, offset, ino, type);
329 offset = dirpos + 1 + ((len + 16) >> 5);
331 /* If filldir didn't fill in the last one this is still pointing to that
334 fp->f_pos = (loff_t) offset;
336 ReleaseReadLock(&tdc->lock);
338 UpgradeSToWLock(&avc->lock, 813);
339 avc->f.states &= ~CReadDir;
341 avc->readdir_pid = 0;
342 ReleaseSharedLock(&avc->lock);
346 afs_PutFakeStat(&fakestat);
353 /* in afs_pioctl.c */
354 extern int afs_xioctl(struct inode *ip, struct file *fp, unsigned int com,
357 #if defined(HAVE_UNLOCKED_IOCTL) || defined(HAVE_COMPAT_IOCTL)
358 static long afs_unlocked_xioctl(struct file *fp, unsigned int com,
360 return afs_xioctl(FILE_INODE(fp), fp, com, arg);
367 afs_linux_mmap(struct file *fp, struct vm_area_struct *vmap)
369 struct vcache *vcp = VTOAFS(FILE_INODE(fp));
373 afs_Trace3(afs_iclSetp, CM_TRACE_GMAP, ICL_TYPE_POINTER, vcp,
374 ICL_TYPE_POINTER, vmap->vm_start, ICL_TYPE_INT32,
375 vmap->vm_end - vmap->vm_start);
377 /* get a validated vcache entry */
378 code = afs_linux_VerifyVCache(vcp, NULL);
381 /* Linux's Flushpage implementation doesn't use credp, so optimise
382 * our code to not need to crref() it */
383 osi_FlushPages(vcp, NULL); /* ensure stale pages are gone */
385 code = generic_file_mmap(fp, vmap);
388 vcp->f.states |= CMAPPED;
396 afs_linux_open(struct inode *ip, struct file *fp)
398 struct vcache *vcp = VTOAFS(ip);
399 cred_t *credp = crref();
403 code = afs_open(&vcp, fp->f_flags, credp);
407 return afs_convert_code(code);
411 afs_linux_release(struct inode *ip, struct file *fp)
413 struct vcache *vcp = VTOAFS(ip);
414 cred_t *credp = crref();
418 code = afs_close(vcp, fp->f_flags, credp);
419 ObtainWriteLock(&vcp->lock, 807);
424 ReleaseWriteLock(&vcp->lock);
428 return afs_convert_code(code);
432 #if defined(FOP_FSYNC_TAKES_DENTRY)
433 afs_linux_fsync(struct file *fp, struct dentry *dp, int datasync)
435 afs_linux_fsync(struct file *fp, int datasync)
439 struct inode *ip = FILE_INODE(fp);
440 cred_t *credp = crref();
443 code = afs_fsync(VTOAFS(ip), credp);
446 return afs_convert_code(code);
452 afs_linux_lock(struct file *fp, int cmd, struct file_lock *flp)
455 struct vcache *vcp = VTOAFS(FILE_INODE(fp));
456 cred_t *credp = crref();
457 struct AFS_FLOCK flock;
459 /* Convert to a lock format afs_lockctl understands. */
460 memset(&flock, 0, sizeof(flock));
461 flock.l_type = flp->fl_type;
462 flock.l_pid = flp->fl_pid;
464 flock.l_start = flp->fl_start;
465 if (flp->fl_end == OFFSET_MAX)
466 flock.l_len = 0; /* Lock to end of file */
468 flock.l_len = flp->fl_end - flp->fl_start + 1;
470 /* Safe because there are no large files, yet */
471 #if defined(F_GETLK64) && (F_GETLK != F_GETLK64)
472 if (cmd == F_GETLK64)
474 else if (cmd == F_SETLK64)
476 else if (cmd == F_SETLKW64)
478 #endif /* F_GETLK64 && F_GETLK != F_GETLK64 */
481 code = afs_convert_code(afs_lockctl(vcp, &flock, cmd, credp));
484 if ((code == 0 || flp->fl_type == F_UNLCK) &&
485 (cmd == F_SETLK || cmd == F_SETLKW)) {
486 code = afs_posix_lock_file(fp, flp);
487 if (code && flp->fl_type != F_UNLCK) {
488 struct AFS_FLOCK flock2;
490 flock2.l_type = F_UNLCK;
492 afs_lockctl(vcp, &flock2, F_SETLK, credp);
496 /* If lockctl says there are no conflicting locks, then also check with the
497 * kernel, as lockctl knows nothing about byte range locks
499 if (code == 0 && cmd == F_GETLK && flock.l_type == F_UNLCK) {
500 afs_posix_test_lock(fp, flp);
501 /* If we found a lock in the kernel's structure, return it */
502 if (flp->fl_type != F_UNLCK) {
508 /* Convert flock back to Linux's file_lock */
509 flp->fl_type = flock.l_type;
510 flp->fl_pid = flock.l_pid;
511 flp->fl_start = flock.l_start;
512 if (flock.l_len == 0)
513 flp->fl_end = OFFSET_MAX; /* Lock to end of file */
515 flp->fl_end = flock.l_start + flock.l_len - 1;
521 #ifdef STRUCT_FILE_OPERATIONS_HAS_FLOCK
523 afs_linux_flock(struct file *fp, int cmd, struct file_lock *flp) {
525 struct vcache *vcp = VTOAFS(FILE_INODE(fp));
526 cred_t *credp = crref();
527 struct AFS_FLOCK flock;
528 /* Convert to a lock format afs_lockctl understands. */
529 memset(&flock, 0, sizeof(flock));
530 flock.l_type = flp->fl_type;
531 flock.l_pid = flp->fl_pid;
536 /* Safe because there are no large files, yet */
537 #if defined(F_GETLK64) && (F_GETLK != F_GETLK64)
538 if (cmd == F_GETLK64)
540 else if (cmd == F_SETLK64)
542 else if (cmd == F_SETLKW64)
544 #endif /* F_GETLK64 && F_GETLK != F_GETLK64 */
547 code = afs_convert_code(afs_lockctl(vcp, &flock, cmd, credp));
550 if ((code == 0 || flp->fl_type == F_UNLCK) &&
551 (cmd == F_SETLK || cmd == F_SETLKW)) {
552 flp->fl_flags &=~ FL_SLEEP;
553 code = flock_lock_file_wait(fp, flp);
554 if (code && flp->fl_type != F_UNLCK) {
555 struct AFS_FLOCK flock2;
557 flock2.l_type = F_UNLCK;
559 afs_lockctl(vcp, &flock2, F_SETLK, credp);
563 /* Convert flock back to Linux's file_lock */
564 flp->fl_type = flock.l_type;
565 flp->fl_pid = flock.l_pid;
573 * essentially the same as afs_fsync() but we need to get the return
574 * code for the sys_close() here, not afs_linux_release(), so call
575 * afs_StoreAllSegments() with AFS_LASTSTORE
578 #if defined(FOP_FLUSH_TAKES_FL_OWNER_T)
579 afs_linux_flush(struct file *fp, fl_owner_t id)
581 afs_linux_flush(struct file *fp)
584 struct vrequest treq;
592 if ((fp->f_flags & O_ACCMODE) == O_RDONLY) { /* readers dont flush */
600 vcp = VTOAFS(FILE_INODE(fp));
602 code = afs_InitReq(&treq, credp);
605 /* If caching is bypassed for this file, or globally, just return 0 */
606 if (cache_bypass_strategy == ALWAYS_BYPASS_CACHE)
609 ObtainReadLock(&vcp->lock);
610 if (vcp->cachingStates & FCSBypass)
612 ReleaseReadLock(&vcp->lock);
615 /* future proof: don't rely on 0 return from afs_InitReq */
620 ObtainSharedLock(&vcp->lock, 535);
621 if ((vcp->execsOrWriters > 0) && (file_count(fp) == 1)) {
622 UpgradeSToWLock(&vcp->lock, 536);
623 if (!AFS_IS_DISCONNECTED) {
624 code = afs_StoreAllSegments(vcp,
626 AFS_SYNC | AFS_LASTSTORE);
628 afs_DisconAddDirty(vcp, VDisconWriteOsiFlush, 1);
630 ConvertWToSLock(&vcp->lock);
632 code = afs_CheckCode(code, &treq, 54);
633 ReleaseSharedLock(&vcp->lock);
640 return afs_convert_code(code);
643 struct file_operations afs_dir_fops = {
644 .read = generic_read_dir,
645 .readdir = afs_linux_readdir,
646 #ifdef HAVE_UNLOCKED_IOCTL
647 .unlocked_ioctl = afs_unlocked_xioctl,
651 #ifdef HAVE_COMPAT_IOCTL
652 .compat_ioctl = afs_unlocked_xioctl,
654 .open = afs_linux_open,
655 .release = afs_linux_release,
656 .llseek = default_llseek,
659 struct file_operations afs_file_fops = {
660 .read = afs_linux_read,
661 .write = afs_linux_write,
662 #ifdef HAVE_LINUX_GENERIC_FILE_AIO_READ
663 .aio_read = generic_file_aio_read,
664 .aio_write = generic_file_aio_write,
666 #ifdef HAVE_UNLOCKED_IOCTL
667 .unlocked_ioctl = afs_unlocked_xioctl,
671 #ifdef HAVE_COMPAT_IOCTL
672 .compat_ioctl = afs_unlocked_xioctl,
674 .mmap = afs_linux_mmap,
675 .open = afs_linux_open,
676 .flush = afs_linux_flush,
677 #if defined(STRUCT_FILE_OPERATIONS_HAS_SENDFILE)
678 .sendfile = generic_file_sendfile,
680 #if defined(STRUCT_FILE_OPERATIONS_HAS_SPLICE)
681 .splice_write = generic_file_splice_write,
682 .splice_read = generic_file_splice_read,
684 .release = afs_linux_release,
685 .fsync = afs_linux_fsync,
686 .lock = afs_linux_lock,
687 #ifdef STRUCT_FILE_OPERATIONS_HAS_FLOCK
688 .flock = afs_linux_flock,
690 .llseek = default_llseek,
694 /**********************************************************************
695 * AFS Linux dentry operations
696 **********************************************************************/
698 /* fix_bad_parent() : called if this dentry's vcache is a root vcache
699 * that has its mvid (parent dir's fid) pointer set to the wrong directory
700 * due to being mounted in multiple points at once. fix_bad_parent()
701 * calls afs_lookup() to correct the vcache's mvid, as well as the volume's
702 * dotdotfid and mtpoint fid members.
704 * dp - dentry to be checked.
705 * credp - credentials
706 * vcp, pvc - item's and parent's vcache pointer
710 * This dentry's vcache's mvid will be set to the correct parent directory's
712 * This root vnode's volume will have its dotdotfid and mtpoint fids set
713 * to the correct parent and mountpoint fids.
717 fix_bad_parent(struct dentry *dp, cred_t *credp, struct vcache *vcp, struct vcache *pvc)
719 struct vcache *avc = NULL;
721 /* force a lookup, so vcp->mvid is fixed up */
722 afs_lookup(pvc, (char *)dp->d_name.name, &avc, credp);
723 if (!avc || vcp != avc) { /* bad, very bad.. */
724 afs_Trace4(afs_iclSetp, CM_TRACE_TMP_1S3L, ICL_TYPE_STRING,
725 "check_bad_parent: bad pointer returned from afs_lookup origvc newvc dentry",
726 ICL_TYPE_POINTER, vcp, ICL_TYPE_POINTER, avc,
727 ICL_TYPE_POINTER, dp);
730 AFS_RELE(AFSTOV(avc));
735 /* afs_linux_revalidate
736 * Ensure vcache is stat'd before use. Return 0 if entry is valid.
739 afs_linux_revalidate(struct dentry *dp)
742 struct vcache *vcp = VTOAFS(dp->d_inode);
746 if (afs_shuttingdown)
752 /* Make this a fast path (no crref), since it's called so often. */
753 if (vcp->states & CStatd) {
754 struct vcache *pvc = VTOAFS(dp->d_parent->d_inode);
756 if (*dp->d_name.name != '/' && vcp->mvstat == 2) { /* root vnode */
757 if (vcp->mvid->Fid.Volume != pvc->fid.Fid.Volume) { /* bad parent */
760 fix_bad_parent(dp); /* check and correct mvid */
769 /* This avoids the crref when we don't have to do it. Watch for
770 * changes in afs_getattr that don't get replicated here!
772 if (vcp->f.states & CStatd &&
773 (!afs_fakestat_enable || vcp->mvstat != 1) &&
775 (vType(vcp) == VDIR || vType(vcp) == VLNK)) {
776 code = afs_CopyOutAttrs(vcp, &vattr);
779 code = afs_getattr(vcp, &vattr, credp);
784 afs_fill_inode(AFSTOV(vcp), &vattr);
788 return afs_convert_code(code);
792 afs_linux_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
794 int err = afs_linux_revalidate(dentry);
796 generic_fillattr(dentry->d_inode, stat);
801 /* Validate a dentry. Return 1 if unchanged, 0 if VFS layer should re-evaluate.
802 * In kernels 2.2.10 and above, we are passed an additional flags var which
803 * may have either the LOOKUP_FOLLOW OR LOOKUP_DIRECTORY set in which case
804 * we are advised to follow the entry if it is a link or to make sure that
805 * it is a directory. But since the kernel itself checks these possibilities
806 * later on, we shouldn't have to do it until later. Perhaps in the future..
808 * The code here assumes that on entry the global lock is not held
811 #ifdef DOP_REVALIDATE_TAKES_NAMEIDATA
812 afs_linux_dentry_revalidate(struct dentry *dp, struct nameidata *nd)
814 afs_linux_dentry_revalidate(struct dentry *dp, int flags)
818 cred_t *credp = NULL;
819 struct vcache *vcp, *pvcp, *tvc = NULL;
820 struct dentry *parent;
822 struct afs_fakestat_state fakestate;
825 afs_InitFakeStat(&fakestate);
828 vcp = VTOAFS(dp->d_inode);
830 if (vcp == afs_globalVp)
833 parent = dget_parent(dp);
834 pvcp = VTOAFS(parent->d_inode);
836 if ((vcp->mvstat == 1) || (vcp->mvstat == 2)) { /* need to lock */
842 if (locked && vcp->mvstat == 1) { /* mount point */
843 if (vcp->mvid && (vcp->f.states & CMValid)) {
846 struct vrequest treq;
848 code = afs_InitReq(&treq, credp);
850 (strcmp(dp->d_name.name, ".directory") == 0)) {
854 code = afs_TryEvalFakeStat(&vcp, &fakestate, &treq);
856 code = afs_EvalFakeStat(&vcp, &fakestate, &treq);
857 if ((tryEvalOnly && vcp->mvstat == 1) || code) {
858 /* a mount point, not yet replaced by its directory */
863 if (locked && *dp->d_name.name != '/' && vcp->mvstat == 2) { /* root vnode */
864 if (vcp->mvid->Fid.Volume != pvcp->f.fid.Fid.Volume) { /* bad parent */
865 fix_bad_parent(dp, credp, vcp, pvcp); /* check and correct mvid */
870 /* If the last looker changes, we should make sure the current
871 * looker still has permission to examine this file. This would
872 * always require a crref() which would be "slow".
874 if (vcp->last_looker != treq.uid) {
875 if (!afs_AccessOK(vcp, (vType(vcp) == VREG) ? PRSFS_READ : PRSFS_LOOKUP, &treq, CHECK_MODE_BITS))
878 vcp->last_looker = treq.uid;
883 /* If the parent's DataVersion has changed or the vnode
884 * is longer valid, we need to do a full lookup. VerifyVCache
885 * isn't enough since the vnode may have been renamed.
888 if ((!locked) && (hgetlo(pvcp->f.m.DataVersion) > dp->d_time || !(vcp->f.states & CStatd)) ) {
894 if (locked && (hgetlo(pvcp->f.m.DataVersion) > dp->d_time || !(vcp->f.states & CStatd))) {
895 afs_lookup(pvcp, (char *)dp->d_name.name, &tvc, credp);
896 if (!tvc || tvc != vcp) {
901 if (afs_getattr(vcp, &vattr, credp)) {
906 vattr2inode(AFSTOV(vcp), &vattr);
907 dp->d_time = hgetlo(pvcp->f.m.DataVersion);
910 /* should we always update the attributes at this point? */
911 /* unlikely--the vcache entry hasn't changed */
916 /* If this code is ever enabled, we should use dget_parent to handle
917 * getting the parent, and dput() to dispose of it. See above for an
919 pvcp = VTOAFS(dp->d_parent->d_inode);
920 if (hgetlo(pvcp->f.m.DataVersion) > dp->d_time)
924 /* No change in parent's DataVersion so this negative
925 * lookup is still valid. BUT, if a server is down a
926 * negative lookup can result so there should be a
927 * liftime as well. For now, always expire.
940 afs_PutFakeStat(&fakestate); /* from here on vcp may be no longer valid */
942 /* we hold the global lock if we evaluated a mount point */
949 shrink_dcache_parent(dp);
955 if (have_submounts(dp))
963 afs_dentry_iput(struct dentry *dp, struct inode *ip)
965 struct vcache *vcp = VTOAFS(ip);
968 if (!AFS_IS_DISCONNECTED || (vcp->f.states & CUnlinked)) {
969 (void) afs_InactiveVCache(vcp, NULL);
972 afs_linux_clear_nfsfs_renamed(dp);
978 afs_dentry_delete(struct dentry *dp)
980 if (dp->d_inode && (VTOAFS(dp->d_inode)->f.states & CUnlinked))
981 return 1; /* bad inode? */
986 struct dentry_operations afs_dentry_operations = {
987 .d_revalidate = afs_linux_dentry_revalidate,
988 .d_delete = afs_dentry_delete,
989 .d_iput = afs_dentry_iput,
992 /**********************************************************************
993 * AFS Linux inode operations
994 **********************************************************************/
998 * Merely need to set enough of vattr to get us through the create. Note
999 * that the higher level code (open_namei) will take care of any tuncation
1000 * explicitly. Exclusive open is also taken care of in open_namei.
1002 * name is in kernel space at this point.
1005 #ifdef IOP_CREATE_TAKES_NAMEIDATA
1006 afs_linux_create(struct inode *dip, struct dentry *dp, int mode,
1007 struct nameidata *nd)
1009 afs_linux_create(struct inode *dip, struct dentry *dp, int mode)
1013 cred_t *credp = crref();
1014 const char *name = dp->d_name.name;
1019 vattr.va_mode = mode;
1020 vattr.va_type = mode & S_IFMT;
1023 code = afs_create(VTOAFS(dip), (char *)name, &vattr, NONEXCL, mode,
1027 struct inode *ip = AFSTOV(vcp);
1029 afs_getattr(vcp, &vattr, credp);
1030 afs_fill_inode(ip, &vattr);
1031 insert_inode_hash(ip);
1032 #if !defined(STRUCT_SUPER_BLOCK_HAS_S_D_OP)
1033 dp->d_op = &afs_dentry_operations;
1035 dp->d_time = hgetlo(VTOAFS(dip)->f.m.DataVersion);
1036 d_instantiate(dp, ip);
1041 return afs_convert_code(code);
1044 /* afs_linux_lookup */
1045 static struct dentry *
1046 #ifdef IOP_LOOKUP_TAKES_NAMEIDATA
1047 afs_linux_lookup(struct inode *dip, struct dentry *dp,
1048 struct nameidata *nd)
1050 afs_linux_lookup(struct inode *dip, struct dentry *dp)
1053 cred_t *credp = crref();
1054 struct vcache *vcp = NULL;
1055 const char *comp = dp->d_name.name;
1056 struct inode *ip = NULL;
1057 struct dentry *newdp = NULL;
1061 code = afs_lookup(VTOAFS(dip), (char *)comp, &vcp, credp);
1067 afs_getattr(vcp, &vattr, credp);
1068 afs_fill_inode(ip, &vattr);
1069 if (hlist_unhashed(&ip->i_hash))
1070 insert_inode_hash(ip);
1072 #if !defined(STRUCT_SUPER_BLOCK_HAS_S_D_OP)
1073 dp->d_op = &afs_dentry_operations;
1075 dp->d_time = hgetlo(VTOAFS(dip)->f.m.DataVersion);
1078 if (ip && S_ISDIR(ip->i_mode)) {
1079 struct dentry *alias;
1081 /* Try to invalidate an existing alias in favor of our new one */
1082 alias = d_find_alias(ip);
1083 /* But not if it's disconnected; then we want d_splice_alias below */
1084 if (alias && !(alias->d_flags & DCACHE_DISCONNECTED)) {
1085 if (d_invalidate(alias) == 0) {
1094 newdp = d_splice_alias(ip, dp);
1098 /* It's ok for the file to not be found. That's noted by the caller by
1099 * seeing that the dp->d_inode field is NULL.
1101 if (!code || code == ENOENT)
1104 return ERR_PTR(afs_convert_code(code));
1108 afs_linux_link(struct dentry *olddp, struct inode *dip, struct dentry *newdp)
1111 cred_t *credp = crref();
1112 const char *name = newdp->d_name.name;
1113 struct inode *oldip = olddp->d_inode;
1115 /* If afs_link returned the vnode, we could instantiate the
1116 * dentry. Since it's not, we drop this one and do a new lookup.
1121 code = afs_link(VTOAFS(oldip), VTOAFS(dip), (char *)name, credp);
1125 return afs_convert_code(code);
1128 /* We have to have a Linux specific sillyrename function, because we
1129 * also have to keep the dcache up to date when we're doing a silly
1130 * rename - so we don't want the generic vnodeops doing this behind our
1135 afs_linux_sillyrename(struct inode *dir, struct dentry *dentry,
1138 struct vcache *tvc = VTOAFS(dentry->d_inode);
1139 struct dentry *__dp = NULL;
1140 char *__name = NULL;
1143 if (afs_linux_nfsfs_renamed(dentry))
1151 osi_FreeSmallSpace(__name);
1152 __name = afs_newname();
1155 __dp = lookup_one_len(__name, dentry->d_parent, strlen(__name));
1158 osi_FreeSmallSpace(__name);
1161 } while (__dp->d_inode != NULL);
1164 code = afs_rename(VTOAFS(dir), (char *)dentry->d_name.name,
1165 VTOAFS(dir), (char *)__dp->d_name.name,
1168 tvc->mvid = (void *) __name;
1171 crfree(tvc->uncred);
1173 tvc->uncred = credp;
1174 tvc->f.states |= CUnlinked;
1175 afs_linux_set_nfsfs_renamed(dentry);
1177 osi_FreeSmallSpace(__name);
1182 __dp->d_time = hgetlo(VTOAFS(dir)->f.m.DataVersion);
1183 d_move(dentry, __dp);
1192 afs_linux_unlink(struct inode *dip, struct dentry *dp)
1195 cred_t *credp = crref();
1196 const char *name = dp->d_name.name;
1197 struct vcache *tvc = VTOAFS(dp->d_inode);
1199 if (VREFCOUNT(tvc) > 1 && tvc->opens > 0
1200 && !(tvc->f.states & CUnlinked)) {
1202 code = afs_linux_sillyrename(dip, dp, credp);
1205 code = afs_remove(VTOAFS(dip), (char *)name, credp);
1212 return afs_convert_code(code);
1217 afs_linux_symlink(struct inode *dip, struct dentry *dp, const char *target)
1220 cred_t *credp = crref();
1222 const char *name = dp->d_name.name;
1224 /* If afs_symlink returned the vnode, we could instantiate the
1225 * dentry. Since it's not, we drop this one and do a new lookup.
1231 code = afs_symlink(VTOAFS(dip), (char *)name, &vattr, (char *)target, credp);
1234 return afs_convert_code(code);
1238 afs_linux_mkdir(struct inode *dip, struct dentry *dp, int mode)
1241 cred_t *credp = crref();
1242 struct vcache *tvcp = NULL;
1244 const char *name = dp->d_name.name;
1247 vattr.va_mask = ATTR_MODE;
1248 vattr.va_mode = mode;
1250 code = afs_mkdir(VTOAFS(dip), (char *)name, &vattr, &tvcp, credp);
1253 struct inode *ip = AFSTOV(tvcp);
1255 afs_getattr(tvcp, &vattr, credp);
1256 afs_fill_inode(ip, &vattr);
1258 #if !defined(STRUCT_SUPER_BLOCK_HAS_S_D_OP)
1259 dp->d_op = &afs_dentry_operations;
1261 dp->d_time = hgetlo(VTOAFS(dip)->f.m.DataVersion);
1262 d_instantiate(dp, ip);
1267 return afs_convert_code(code);
1271 afs_linux_rmdir(struct inode *dip, struct dentry *dp)
1274 cred_t *credp = crref();
1275 const char *name = dp->d_name.name;
1277 /* locking kernel conflicts with glock? */
1280 code = afs_rmdir(VTOAFS(dip), (char *)name, credp);
1283 /* Linux likes to see ENOTEMPTY returned from an rmdir() syscall
1284 * that failed because a directory is not empty. So, we map
1285 * EEXIST to ENOTEMPTY on linux.
1287 if (code == EEXIST) {
1296 return afs_convert_code(code);
1301 afs_linux_rename(struct inode *oldip, struct dentry *olddp,
1302 struct inode *newip, struct dentry *newdp)
1305 cred_t *credp = crref();
1306 const char *oldname = olddp->d_name.name;
1307 const char *newname = newdp->d_name.name;
1308 struct dentry *rehash = NULL;
1310 /* Prevent any new references during rename operation. */
1312 if (!d_unhashed(newdp)) {
1317 if (atomic_read(&olddp->d_count) > 1)
1318 shrink_dcache_parent(olddp);
1321 code = afs_rename(VTOAFS(oldip), (char *)oldname, VTOAFS(newip), (char *)newname, credp);
1325 olddp->d_time = 0; /* force to revalidate */
1331 return afs_convert_code(code);
1335 /* afs_linux_ireadlink
1336 * Internal readlink which can return link contents to user or kernel space.
1337 * Note that the buffer is NOT supposed to be null-terminated.
1340 afs_linux_ireadlink(struct inode *ip, char *target, int maxlen, uio_seg_t seg)
1343 cred_t *credp = crref();
1347 setup_uio(&tuio, &iov, target, (afs_offs_t) 0, maxlen, UIO_READ, seg);
1348 code = afs_readlink(VTOAFS(ip), &tuio, credp);
1352 return maxlen - tuio.uio_resid;
1354 return afs_convert_code(code);
1357 #if !defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
1358 /* afs_linux_readlink
1359 * Fill target (which is in user space) with contents of symlink.
1362 afs_linux_readlink(struct dentry *dp, char *target, int maxlen)
1365 struct inode *ip = dp->d_inode;
1368 code = afs_linux_ireadlink(ip, target, maxlen, AFS_UIOUSER);
1374 /* afs_linux_follow_link
1375 * a file system dependent link following routine.
1377 static int afs_linux_follow_link(struct dentry *dentry, struct nameidata *nd)
1382 name = osi_Alloc(PATH_MAX);
1388 code = afs_linux_ireadlink(dentry->d_inode, name, PATH_MAX - 1, AFS_UIOSYS);
1396 nd_set_link(nd, name);
1401 afs_linux_put_link(struct dentry *dentry, struct nameidata *nd)
1403 char *name = nd_get_link(nd);
1404 if (name && !IS_ERR(name)) {
1405 osi_Free(name, PATH_MAX);
1409 #endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
1411 /* Populate a page by filling it from the cache file pointed at by cachefp
1412 * (which contains indicated chunk)
1413 * If task is NULL, the page copy occurs syncronously, and the routine
1414 * returns with page still locked. If task is non-NULL, then page copies
1415 * may occur in the background, and the page will be unlocked when it is
1419 afs_linux_read_cache(struct file *cachefp, struct page *page,
1420 int chunk, struct pagevec *lrupv,
1421 struct afs_pagecopy_task *task) {
1422 loff_t offset = page_offset(page);
1423 struct page *newpage, *cachepage;
1424 struct address_space *cachemapping;
1428 cachemapping = cachefp->f_dentry->d_inode->i_mapping;
1432 /* From our offset, we now need to work out which page in the disk
1433 * file it corresponds to. This will be fun ... */
1434 pageindex = (offset - AFS_CHUNKTOBASE(chunk)) >> PAGE_CACHE_SHIFT;
1436 while (cachepage == NULL) {
1437 cachepage = find_get_page(cachemapping, pageindex);
1440 newpage = page_cache_alloc_cold(cachemapping);
1446 code = add_to_page_cache(newpage, cachemapping,
1447 pageindex, GFP_KERNEL);
1449 cachepage = newpage;
1452 page_cache_get(cachepage);
1453 if (!pagevec_add(lrupv, cachepage))
1454 __pagevec_lru_add_file(lrupv);
1457 page_cache_release(newpage);
1459 if (code != -EEXIST)
1463 lock_page(cachepage);
1467 if (!PageUptodate(cachepage)) {
1468 ClearPageError(cachepage);
1469 code = cachemapping->a_ops->readpage(NULL, cachepage);
1470 if (!code && !task) {
1471 wait_on_page_locked(cachepage);
1474 unlock_page(cachepage);
1478 if (PageUptodate(cachepage)) {
1479 copy_highpage(page, cachepage);
1480 flush_dcache_page(page);
1481 SetPageUptodate(page);
1486 afs_pagecopy_queue_page(task, cachepage, page);
1498 page_cache_release(cachepage);
1504 afs_linux_readpage_fastpath(struct file *fp, struct page *pp, int *codep)
1506 loff_t offset = page_offset(pp);
1507 struct inode *ip = FILE_INODE(fp);
1508 struct vcache *avc = VTOAFS(ip);
1510 struct file *cacheFp = NULL;
1513 struct pagevec lrupv;
1515 /* Not a UFS cache, don't do anything */
1516 if (cacheDiskType != AFS_FCACHE_TYPE_UFS)
1519 /* Can't do anything if the vcache isn't statd , or if the read
1520 * crosses a chunk boundary.
1522 if (!(avc->f.states & CStatd) ||
1523 AFS_CHUNK(offset) != AFS_CHUNK(offset + PAGE_SIZE)) {
1527 ObtainWriteLock(&avc->lock, 911);
1529 /* XXX - See if hinting actually makes things faster !!! */
1531 /* See if we have a suitable entry already cached */
1535 /* We need to lock xdcache, then dcache, to handle situations where
1536 * the hint is on the free list. However, we can't safely do this
1537 * according to the locking hierarchy. So, use a non blocking lock.
1539 ObtainReadLock(&afs_xdcache);
1540 dcLocked = ( 0 == NBObtainReadLock(&tdc->lock));
1542 if (dcLocked && (tdc->index != NULLIDX)
1543 && !FidCmp(&tdc->f.fid, &avc->f.fid)
1544 && tdc->f.chunk == AFS_CHUNK(offset)
1545 && !(afs_indexFlags[tdc->index] & (IFFree | IFDiscarded))) {
1546 /* Bonus - the hint was correct */
1549 /* Only destroy the hint if its actually invalid, not if there's
1550 * just been a locking failure */
1552 ReleaseReadLock(&tdc->lock);
1559 ReleaseReadLock(&afs_xdcache);
1562 /* No hint, or hint is no longer valid - see if we can get something
1563 * directly from the dcache
1566 tdc = afs_FindDCache(avc, offset);
1569 ReleaseWriteLock(&avc->lock);
1574 ObtainReadLock(&tdc->lock);
1576 /* Is the dcache we've been given currently up to date */
1577 if (!hsame(avc->f.m.DataVersion, tdc->f.versionNo) ||
1578 (tdc->dflags & DFFetching)) {
1579 ReleaseWriteLock(&avc->lock);
1580 ReleaseReadLock(&tdc->lock);
1585 /* Update our hint for future abuse */
1588 /* Okay, so we've now got a cache file that is up to date */
1590 /* XXX - I suspect we should be locking the inodes before we use them! */
1592 cacheFp = afs_linux_raw_open(&tdc->f.inode);
1593 pagevec_init(&lrupv, 0);
1595 code = afs_linux_read_cache(cacheFp, pp, tdc->f.chunk, &lrupv, NULL);
1597 if (pagevec_count(&lrupv))
1598 __pagevec_lru_add_file(&lrupv);
1600 filp_close(cacheFp, NULL);
1603 ReleaseReadLock(&tdc->lock);
1604 ReleaseWriteLock(&avc->lock);
1611 /* afs_linux_readpage
1613 * This function is split into two, because prepare_write/begin_write
1614 * require a readpage call which doesn't unlock the resulting page upon
1618 afs_linux_fillpage(struct file *fp, struct page *pp)
1623 struct iovec *iovecp;
1624 struct inode *ip = FILE_INODE(fp);
1625 afs_int32 cnt = page_count(pp);
1626 struct vcache *avc = VTOAFS(ip);
1627 afs_offs_t offset = page_offset(pp);
1631 if (afs_linux_readpage_fastpath(fp, pp, &code)) {
1641 auio = osi_Alloc(sizeof(uio_t));
1642 iovecp = osi_Alloc(sizeof(struct iovec));
1644 setup_uio(auio, iovecp, (char *)address, offset, PAGE_SIZE, UIO_READ,
1649 afs_Trace4(afs_iclSetp, CM_TRACE_READPAGE, ICL_TYPE_POINTER, ip,
1650 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, cnt, ICL_TYPE_INT32,
1651 99999); /* not a possible code value */
1653 code = afs_rdwr(avc, auio, UIO_READ, 0, credp);
1655 afs_Trace4(afs_iclSetp, CM_TRACE_READPAGE, ICL_TYPE_POINTER, ip,
1656 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, cnt, ICL_TYPE_INT32,
1658 AFS_DISCON_UNLOCK();
1661 /* XXX valid for no-cache also? Check last bits of files... :)
1662 * Cognate code goes in afs_NoCacheFetchProc. */
1663 if (auio->uio_resid) /* zero remainder of page */
1664 memset((void *)(address + (PAGE_SIZE - auio->uio_resid)), 0,
1667 flush_dcache_page(pp);
1668 SetPageUptodate(pp);
1673 osi_Free(auio, sizeof(uio_t));
1674 osi_Free(iovecp, sizeof(struct iovec));
1677 return afs_convert_code(code);
1681 afs_linux_prefetch(struct file *fp, struct page *pp)
1684 struct vcache *avc = VTOAFS(FILE_INODE(fp));
1685 afs_offs_t offset = page_offset(pp);
1687 if (AFS_CHUNKOFFSET(offset) == 0) {
1689 struct vrequest treq;
1694 code = afs_InitReq(&treq, credp);
1695 if (!code && !NBObtainWriteLock(&avc->lock, 534)) {
1696 tdc = afs_FindDCache(avc, offset);
1698 if (!(tdc->mflags & DFNextStarted))
1699 afs_PrefetchChunk(avc, tdc, credp, &treq);
1702 ReleaseWriteLock(&avc->lock);
1707 return afs_convert_code(code);
1712 afs_linux_bypass_readpages(struct file *fp, struct address_space *mapping,
1713 struct list_head *page_list, unsigned num_pages)
1718 struct iovec* iovecp;
1719 struct nocache_read_request *ancr;
1721 struct pagevec lrupv;
1725 struct inode *ip = FILE_INODE(fp);
1726 struct vcache *avc = VTOAFS(ip);
1727 afs_int32 base_index = 0;
1728 afs_int32 page_count = 0;
1731 /* background thread must free: iovecp, auio, ancr */
1732 iovecp = osi_Alloc(num_pages * sizeof(struct iovec));
1734 auio = osi_Alloc(sizeof(uio_t));
1735 auio->uio_iov = iovecp;
1736 auio->uio_iovcnt = num_pages;
1737 auio->uio_flag = UIO_READ;
1738 auio->uio_seg = AFS_UIOSYS;
1739 auio->uio_resid = num_pages * PAGE_SIZE;
1741 ancr = osi_Alloc(sizeof(struct nocache_read_request));
1743 ancr->offset = auio->uio_offset;
1744 ancr->length = auio->uio_resid;
1746 pagevec_init(&lrupv, 0);
1748 for(page_ix = 0; page_ix < num_pages; ++page_ix) {
1750 if(list_empty(page_list))
1753 pp = list_entry(page_list->prev, struct page, lru);
1754 /* If we allocate a page and don't remove it from page_list,
1755 * the page cache gets upset. */
1757 isize = (i_size_read(fp->f_mapping->host) - 1) >> PAGE_CACHE_SHIFT;
1758 if(pp->index > isize) {
1765 offset = page_offset(pp);
1766 auio->uio_offset = offset;
1767 base_index = pp->index;
1769 iovecp[page_ix].iov_len = PAGE_SIZE;
1770 code = add_to_page_cache(pp, mapping, pp->index, GFP_KERNEL);
1771 if(base_index != pp->index) {
1774 page_cache_release(pp);
1775 iovecp[page_ix].iov_base = (void *) 0;
1777 ancr->length -= PAGE_SIZE;
1784 page_cache_release(pp);
1785 iovecp[page_ix].iov_base = (void *) 0;
1788 if(!PageLocked(pp)) {
1792 /* increment page refcount--our original design assumed
1793 * that locking it would effectively pin it; protect
1794 * ourselves from the possiblity that this assumption is
1795 * is faulty, at low cost (provided we do not fail to
1796 * do the corresponding decref on the other side) */
1799 /* save the page for background map */
1800 iovecp[page_ix].iov_base = (void*) pp;
1802 /* and put it on the LRU cache */
1803 if (!pagevec_add(&lrupv, pp))
1804 __pagevec_lru_add_file(&lrupv);
1808 /* If there were useful pages in the page list, make sure all pages
1809 * are in the LRU cache, then schedule the read */
1811 if (pagevec_count(&lrupv))
1812 __pagevec_lru_add_file(&lrupv);
1814 code = afs_ReadNoCache(avc, ancr, credp);
1817 /* If there is nothing for the background thread to handle,
1818 * it won't be freeing the things that we never gave it */
1819 osi_Free(iovecp, num_pages * sizeof(struct iovec));
1820 osi_Free(auio, sizeof(uio_t));
1821 osi_Free(ancr, sizeof(struct nocache_read_request));
1823 /* we do not flush, release, or unmap pages--that will be
1824 * done for us by the background thread as each page comes in
1825 * from the fileserver */
1826 return afs_convert_code(code);
1831 afs_linux_bypass_readpage(struct file *fp, struct page *pp)
1833 cred_t *credp = NULL;
1835 struct iovec *iovecp;
1836 struct nocache_read_request *ancr;
1840 * Special case: if page is at or past end of file, just zero it and set
1843 if (page_offset(pp) >= i_size_read(fp->f_mapping->host)) {
1844 zero_user_segment(pp, 0, PAGE_CACHE_SIZE);
1845 SetPageUptodate(pp);
1852 /* receiver frees */
1853 auio = osi_Alloc(sizeof(uio_t));
1854 iovecp = osi_Alloc(sizeof(struct iovec));
1856 /* address can be NULL, because we overwrite it with 'pp', below */
1857 setup_uio(auio, iovecp, NULL, page_offset(pp),
1858 PAGE_SIZE, UIO_READ, AFS_UIOSYS);
1860 /* save the page for background map */
1861 get_page(pp); /* see above */
1862 auio->uio_iov->iov_base = (void*) pp;
1863 /* the background thread will free this */
1864 ancr = osi_Alloc(sizeof(struct nocache_read_request));
1866 ancr->offset = page_offset(pp);
1867 ancr->length = PAGE_SIZE;
1870 code = afs_ReadNoCache(VTOAFS(FILE_INODE(fp)), ancr, credp);
1873 return afs_convert_code(code);
1877 afs_linux_can_bypass(struct inode *ip) {
1878 switch(cache_bypass_strategy) {
1879 case NEVER_BYPASS_CACHE:
1881 case ALWAYS_BYPASS_CACHE:
1883 case LARGE_FILES_BYPASS_CACHE:
1884 if(i_size_read(ip) > cache_bypass_threshold)
1891 /* Check if a file is permitted to bypass the cache by policy, and modify
1892 * the cache bypass state recorded for that file */
1895 afs_linux_bypass_check(struct inode *ip) {
1898 int bypass = afs_linux_can_bypass(ip);
1901 trydo_cache_transition(VTOAFS(ip), credp, bypass);
1909 afs_linux_readpage(struct file *fp, struct page *pp)
1913 if (afs_linux_bypass_check(FILE_INODE(fp))) {
1914 code = afs_linux_bypass_readpage(fp, pp);
1916 code = afs_linux_fillpage(fp, pp);
1918 code = afs_linux_prefetch(fp, pp);
1925 /* Readpages reads a number of pages for a particular file. We use
1926 * this to optimise the reading, by limiting the number of times upon which
1927 * we have to lookup, lock and open vcaches and dcaches
1931 afs_linux_readpages(struct file *fp, struct address_space *mapping,
1932 struct list_head *page_list, unsigned int num_pages)
1934 struct inode *inode = mapping->host;
1935 struct vcache *avc = VTOAFS(inode);
1937 struct file *cacheFp = NULL;
1939 unsigned int page_idx;
1941 struct pagevec lrupv;
1942 struct afs_pagecopy_task *task;
1944 if (afs_linux_bypass_check(inode))
1945 return afs_linux_bypass_readpages(fp, mapping, page_list, num_pages);
1947 if (cacheDiskType == AFS_FCACHE_TYPE_MEM)
1951 if ((code = afs_linux_VerifyVCache(avc, NULL))) {
1956 ObtainWriteLock(&avc->lock, 912);
1959 task = afs_pagecopy_init_task();
1962 pagevec_init(&lrupv, 0);
1963 for (page_idx = 0; page_idx < num_pages; page_idx++) {
1964 struct page *page = list_entry(page_list->prev, struct page, lru);
1965 list_del(&page->lru);
1966 offset = page_offset(page);
1968 if (tdc && tdc->f.chunk != AFS_CHUNK(offset)) {
1970 ReleaseReadLock(&tdc->lock);
1975 filp_close(cacheFp, NULL);
1980 if ((tdc = afs_FindDCache(avc, offset))) {
1981 ObtainReadLock(&tdc->lock);
1982 if (!hsame(avc->f.m.DataVersion, tdc->f.versionNo) ||
1983 (tdc->dflags & DFFetching)) {
1984 ReleaseReadLock(&tdc->lock);
1991 cacheFp = afs_linux_raw_open(&tdc->f.inode);
1994 if (tdc && !add_to_page_cache(page, mapping, page->index,
1996 page_cache_get(page);
1997 if (!pagevec_add(&lrupv, page))
1998 __pagevec_lru_add_file(&lrupv);
2000 afs_linux_read_cache(cacheFp, page, tdc->f.chunk, &lrupv, task);
2002 page_cache_release(page);
2004 if (pagevec_count(&lrupv))
2005 __pagevec_lru_add_file(&lrupv);
2008 filp_close(cacheFp, NULL);
2010 afs_pagecopy_put_task(task);
2014 ReleaseReadLock(&tdc->lock);
2018 ReleaseWriteLock(&avc->lock);
2023 /* Prepare an AFS vcache for writeback. Should be called with the vcache
2026 afs_linux_prepare_writeback(struct vcache *avc) {
2027 if (avc->f.states & CPageWrite) {
2028 return AOP_WRITEPAGE_ACTIVATE;
2030 avc->f.states |= CPageWrite;
2035 afs_linux_dopartialwrite(struct vcache *avc, cred_t *credp) {
2036 struct vrequest treq;
2039 if (!afs_InitReq(&treq, credp))
2040 code = afs_DoPartialWrite(avc, &treq);
2042 return afs_convert_code(code);
2046 afs_linux_complete_writeback(struct vcache *avc) {
2047 avc->f.states &= ~CPageWrite;
2050 /* Writeback a given page syncronously. Called with no AFS locks held */
2052 afs_linux_page_writeback(struct inode *ip, struct page *pp,
2053 unsigned long offset, unsigned int count,
2056 struct vcache *vcp = VTOAFS(ip);
2064 buffer = kmap(pp) + offset;
2065 base = page_offset(pp) + offset;
2068 afs_Trace4(afs_iclSetp, CM_TRACE_UPDATEPAGE, ICL_TYPE_POINTER, vcp,
2069 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, page_count(pp),
2070 ICL_TYPE_INT32, 99999);
2072 setup_uio(&tuio, &iovec, buffer, base, count, UIO_WRITE, AFS_UIOSYS);
2074 code = afs_write(vcp, &tuio, f_flags, credp, 0);
2076 i_size_write(ip, vcp->f.m.Length);
2077 ip->i_blocks = ((vcp->f.m.Length + 1023) >> 10) << 1;
2079 code = code ? afs_convert_code(code) : count - tuio.uio_resid;
2081 afs_Trace4(afs_iclSetp, CM_TRACE_UPDATEPAGE, ICL_TYPE_POINTER, vcp,
2082 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, page_count(pp),
2083 ICL_TYPE_INT32, code);
2092 afs_linux_writepage_sync(struct inode *ip, struct page *pp,
2093 unsigned long offset, unsigned int count)
2097 struct vcache *vcp = VTOAFS(ip);
2100 /* Catch recursive writeback. This occurs if the kernel decides
2101 * writeback is required whilst we are writing to the cache, or
2102 * flushing to the server. When we're running syncronously (as
2103 * opposed to from writepage) we can't actually do anything about
2104 * this case - as we can't return AOP_WRITEPAGE_ACTIVATE to write()
2107 ObtainWriteLock(&vcp->lock, 532);
2108 afs_linux_prepare_writeback(vcp);
2109 ReleaseWriteLock(&vcp->lock);
2113 code = afs_linux_page_writeback(ip, pp, offset, count, credp);
2116 ObtainWriteLock(&vcp->lock, 533);
2118 code1 = afs_linux_dopartialwrite(vcp, credp);
2119 afs_linux_complete_writeback(vcp);
2120 ReleaseWriteLock(&vcp->lock);
2131 #ifdef AOP_WRITEPAGE_TAKES_WRITEBACK_CONTROL
2132 afs_linux_writepage(struct page *pp, struct writeback_control *wbc)
2134 afs_linux_writepage(struct page *pp)
2137 struct address_space *mapping = pp->mapping;
2138 struct inode *inode;
2141 unsigned int to = PAGE_CACHE_SIZE;
2146 if (PageReclaim(pp)) {
2147 return AOP_WRITEPAGE_ACTIVATE;
2148 /* XXX - Do we need to redirty the page here? */
2153 inode = mapping->host;
2154 vcp = VTOAFS(inode);
2155 isize = i_size_read(inode);
2157 /* Don't defeat an earlier truncate */
2158 if (page_offset(pp) > isize) {
2159 set_page_writeback(pp);
2165 ObtainWriteLock(&vcp->lock, 537);
2166 code = afs_linux_prepare_writeback(vcp);
2167 if (code == AOP_WRITEPAGE_ACTIVATE) {
2168 /* WRITEPAGE_ACTIVATE is the only return value that permits us
2169 * to return with the page still locked */
2170 ReleaseWriteLock(&vcp->lock);
2175 /* Grab the creds structure currently held in the vnode, and
2176 * get a reference to it, in case it goes away ... */
2182 ReleaseWriteLock(&vcp->lock);
2185 set_page_writeback(pp);
2187 SetPageUptodate(pp);
2189 /* We can unlock the page here, because it's protected by the
2190 * page_writeback flag. This should make us less vulnerable to
2191 * deadlocking in afs_write and afs_DoPartialWrite
2195 /* If this is the final page, then just write the number of bytes that
2196 * are actually in it */
2197 if ((isize - page_offset(pp)) < to )
2198 to = isize - page_offset(pp);
2200 code = afs_linux_page_writeback(inode, pp, 0, to, credp);
2203 ObtainWriteLock(&vcp->lock, 538);
2205 /* As much as we might like to ignore a file server error here,
2206 * and just try again when we close(), unfortunately StoreAllSegments
2207 * will invalidate our chunks if the server returns a permanent error,
2208 * so we need to at least try and get that error back to the user
2211 code1 = afs_linux_dopartialwrite(vcp, credp);
2213 afs_linux_complete_writeback(vcp);
2214 ReleaseWriteLock(&vcp->lock);
2219 end_page_writeback(pp);
2220 page_cache_release(pp);
2231 /* afs_linux_permission
2232 * Check access rights - returns error if can't check or permission denied.
2235 #ifdef IOP_PERMISSION_TAKES_NAMEIDATA
2236 afs_linux_permission(struct inode *ip, int mode, struct nameidata *nd)
2238 afs_linux_permission(struct inode *ip, int mode)
2242 cred_t *credp = crref();
2246 if (mode & MAY_EXEC)
2248 if (mode & MAY_READ)
2250 if (mode & MAY_WRITE)
2252 code = afs_access(VTOAFS(ip), tmp, credp);
2256 return afs_convert_code(code);
2260 afs_linux_commit_write(struct file *file, struct page *page, unsigned offset,
2264 struct inode *inode = FILE_INODE(file);
2265 loff_t pagebase = page_offset(page);
2267 if (i_size_read(inode) < (pagebase + offset))
2268 i_size_write(inode, pagebase + offset);
2270 if (PageChecked(page)) {
2271 SetPageUptodate(page);
2272 ClearPageChecked(page);
2275 code = afs_linux_writepage_sync(inode, page, offset, to - offset);
2281 afs_linux_prepare_write(struct file *file, struct page *page, unsigned from,
2285 /* http://kerneltrap.org/node/4941 details the expected behaviour of
2286 * prepare_write. Essentially, if the page exists within the file,
2287 * and is not being fully written, then we should populate it.
2290 if (!PageUptodate(page)) {
2291 loff_t pagebase = page_offset(page);
2292 loff_t isize = i_size_read(page->mapping->host);
2294 /* Is the location we are writing to beyond the end of the file? */
2295 if (pagebase >= isize ||
2296 ((from == 0) && (pagebase + to) >= isize)) {
2297 zero_user_segments(page, 0, from, to, PAGE_CACHE_SIZE);
2298 SetPageChecked(page);
2299 /* Are we we writing a full page */
2300 } else if (from == 0 && to == PAGE_CACHE_SIZE) {
2301 SetPageChecked(page);
2302 /* Is the page readable, if it's wronly, we don't care, because we're
2303 * not actually going to read from it ... */
2304 } else if ((file->f_flags && O_ACCMODE) != O_WRONLY) {
2305 /* We don't care if fillpage fails, because if it does the page
2306 * won't be marked as up to date
2308 afs_linux_fillpage(file, page);
2314 #if defined(STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_WRITE_BEGIN)
2316 afs_linux_write_end(struct file *file, struct address_space *mapping,
2317 loff_t pos, unsigned len, unsigned copied,
2318 struct page *page, void *fsdata)
2321 unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
2323 code = afs_linux_commit_write(file, page, from, from + len);
2326 page_cache_release(page);
2331 afs_linux_write_begin(struct file *file, struct address_space *mapping,
2332 loff_t pos, unsigned len, unsigned flags,
2333 struct page **pagep, void **fsdata)
2336 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2337 unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
2340 page = grab_cache_page_write_begin(mapping, index, flags);
2343 code = afs_linux_prepare_write(file, page, from, from + len);
2346 page_cache_release(page);
2354 static struct inode_operations afs_file_iops = {
2355 .permission = afs_linux_permission,
2356 .getattr = afs_linux_getattr,
2357 .setattr = afs_notify_change,
2360 static struct address_space_operations afs_file_aops = {
2361 .readpage = afs_linux_readpage,
2362 .readpages = afs_linux_readpages,
2363 .writepage = afs_linux_writepage,
2364 #if defined (STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_WRITE_BEGIN)
2365 .write_begin = afs_linux_write_begin,
2366 .write_end = afs_linux_write_end,
2368 .commit_write = afs_linux_commit_write,
2369 .prepare_write = afs_linux_prepare_write,
2374 /* Separate ops vector for directories. Linux 2.2 tests type of inode
2375 * by what sort of operation is allowed.....
2378 static struct inode_operations afs_dir_iops = {
2379 .setattr = afs_notify_change,
2380 .create = afs_linux_create,
2381 .lookup = afs_linux_lookup,
2382 .link = afs_linux_link,
2383 .unlink = afs_linux_unlink,
2384 .symlink = afs_linux_symlink,
2385 .mkdir = afs_linux_mkdir,
2386 .rmdir = afs_linux_rmdir,
2387 .rename = afs_linux_rename,
2388 .getattr = afs_linux_getattr,
2389 .permission = afs_linux_permission,
2392 /* We really need a separate symlink set of ops, since do_follow_link()
2393 * determines if it _is_ a link by checking if the follow_link op is set.
2395 #if defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
2397 afs_symlink_filler(struct file *file, struct page *page)
2399 struct inode *ip = (struct inode *)page->mapping->host;
2400 char *p = (char *)kmap(page);
2404 code = afs_linux_ireadlink(ip, p, PAGE_SIZE, AFS_UIOSYS);
2409 p[code] = '\0'; /* null terminate? */
2411 SetPageUptodate(page);
2423 static struct address_space_operations afs_symlink_aops = {
2424 .readpage = afs_symlink_filler
2426 #endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
2428 static struct inode_operations afs_symlink_iops = {
2429 #if defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
2430 .readlink = page_readlink,
2431 # if defined(HAVE_LINUX_PAGE_FOLLOW_LINK)
2432 .follow_link = page_follow_link,
2434 .follow_link = page_follow_link_light,
2435 .put_link = page_put_link,
2437 #else /* !defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE) */
2438 .readlink = afs_linux_readlink,
2439 .follow_link = afs_linux_follow_link,
2440 .put_link = afs_linux_put_link,
2441 #endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
2442 .setattr = afs_notify_change,
2446 afs_fill_inode(struct inode *ip, struct vattr *vattr)
2450 vattr2inode(ip, vattr);
2452 ip->i_mapping->backing_dev_info = afs_backing_dev_info;
2453 /* Reset ops if symlink or directory. */
2454 if (S_ISREG(ip->i_mode)) {
2455 ip->i_op = &afs_file_iops;
2456 ip->i_fop = &afs_file_fops;
2457 ip->i_data.a_ops = &afs_file_aops;
2459 } else if (S_ISDIR(ip->i_mode)) {
2460 ip->i_op = &afs_dir_iops;
2461 ip->i_fop = &afs_dir_fops;
2463 } else if (S_ISLNK(ip->i_mode)) {
2464 ip->i_op = &afs_symlink_iops;
2465 #if defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
2466 ip->i_data.a_ops = &afs_symlink_aops;
2467 ip->i_mapping = &ip->i_data;