*/
#include <afsconfig.h>
-#include "../afs/param.h"
+#include "afs/param.h"
-RCSID("$Header$");
-#include "../afs/sysincludes.h" /* Standard vendor system headers */
-#include "../afs/afsincludes.h" /* Afs-based standard headers */
-#include "../afs/afs_stats.h" /* statistics */
+#include "afs/sysincludes.h" /* Standard vendor system headers */
+#include "afsincludes.h" /* Afs-based standard headers */
+#include "afs/afs_stats.h" /* statistics */
/* Linux VM operations
*
* is not dropped and re-acquired for any platform. It may be that *slept is
* therefore obsolescent.
*/
-int osi_VM_FlushVCache(struct vcache *avc, int *slept)
+int
+osi_VM_FlushVCache(struct vcache *avc, int *slept)
{
- struct inode *ip = (struct inode*)avc;
+ struct inode *ip = AFSTOV(avc);
- if (avc->vrefCount != 0)
+ if (VREFCOUNT(avc) > 1)
return EBUSY;
if (avc->opens != 0)
return EBUSY;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
- truncate_inode_pages(&ip->i_data, 0);
-#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,15)
- truncate_inode_pages(ip, 0);
-#else
- invalidate_inode_pages(ip);
-#endif
+ return vmtruncate(ip, 0);
return 0;
}
* Since we drop and re-obtain the lock, we can't guarantee that there won't
* be some pages around when we return, newly created by concurrent activity.
*/
-void osi_VM_TryToSmush(struct vcache *avc, struct AFS_UCRED *acred, int sync)
+void
+osi_VM_TryToSmush(struct vcache *avc, afs_ucred_t *acred, int sync)
{
- invalidate_inode_pages((struct inode *)avc);
+ struct inode *ip = AFSTOV(avc);
+
+ invalidate_mapping_pages(ip->i_mapping, 0, -1);
}
/* Flush and invalidate pages, for fsync() with INVAL flag
*
* Locking: only the global lock is held.
*/
-void osi_VM_FSyncInval(struct vcache *avc)
+void
+osi_VM_FSyncInval(struct vcache *avc)
{
}
* Locking: the vcache entry's lock is held. It will usually be dropped and
* re-obtained.
*/
-void osi_VM_StoreAllSegments(struct vcache *avc)
+void
+osi_VM_StoreAllSegments(struct vcache *avc)
{
-
+ struct inode *ip = AFSTOV(avc);
+
+ if (avc->f.states & CPageWrite)
+ return; /* someone already writing */
+
+ /* filemap_fdatasync() only exported in 2.4.5 and above */
+ ReleaseWriteLock(&avc->lock);
+ AFS_GUNLOCK();
+ filemap_fdatawrite(ip->i_mapping);
+ filemap_fdatawait(ip->i_mapping);
+ AFS_GLOCK();
+ ObtainWriteLock(&avc->lock, 121);
}
/* Purge VM for a file when its callback is revoked.
*
* Locking: No lock is held, not even the global lock.
*/
-void osi_VM_FlushPages(struct vcache *avc, struct AFS_UCRED *credp)
-{
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
- struct inode *ip = (struct inode*)avc;
+/* Note that for speed some of our Linux vnodeops do not initialise credp
+ * before calling osi_FlushPages(). If credp is ever required on Linux,
+ * then these callers should be updated.
+ */
+void
+osi_VM_FlushPages(struct vcache *avc, afs_ucred_t *credp)
+{
+ struct inode *ip = AFSTOV(avc);
+
truncate_inode_pages(&ip->i_data, 0);
-#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,15)
- struct inode *ip = (struct inode*)avc;
-
- truncate_inode_pages(ip, 0);
-#else
- invalidate_inode_pages((struct inode*)avc);
-#endif
}
/* Purge pages beyond end-of-file, when truncating a file.
* activeV is raised. This is supposed to block pageins, but at present
* it only works on Solaris.
*/
-void osi_VM_Truncate(struct vcache *avc, int alen, struct AFS_UCRED *acred)
+void
+osi_VM_Truncate(struct vcache *avc, int alen, afs_ucred_t *acred)
{
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
- struct inode *ip = (struct inode*)avc;
-
- truncate_inode_pages(&ip->i_data, alen);
-#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,15)
- struct inode *ip = (struct inode*)avc;
-
- truncate_inode_pages(ip, alen);
-#else
- invalidate_inode_pages((struct inode*)avc);
-#endif
+ vmtruncate(AFSTOV(avc), alen);
}