afs_maybe_lock_kernel();
AFS_GLOCK();
code = afs_close(vcp, fp->f_flags, credp);
+ ObtainWriteLock(&vcp->lock, 807);
+ if (vcp->cred) {
+ crfree(vcp->cred);
+ vcp->cred = NULL;
+ }
+ ReleaseWriteLock(&vcp->lock);
AFS_GUNLOCK();
afs_maybe_unlock_kernel();
struct AFS_FLOCK flock;
/* Convert to a lock format afs_lockctl understands. */
- memset((char *)&flock, 0, sizeof(flock));
+ memset(&flock, 0, sizeof(flock));
flock.l_type = flp->fl_type;
flock.l_pid = flp->fl_pid;
flock.l_whence = 0;
cred_t *credp = crref();
struct AFS_FLOCK flock;
/* Convert to a lock format afs_lockctl understands. */
- memset((char *)&flock, 0, sizeof(flock));
+ memset(&flock, 0, sizeof(flock));
flock.l_type = flp->fl_type;
flock.l_pid = flp->fl_pid;
flock.l_whence = 0;
dput(alias);
} else {
iput(ip);
- unlock_kernel();
+ afs_maybe_unlock_kernel();
crfree(credp);
return alias;
}
afs_offs_t offset;
struct iovec* iovecp;
struct nocache_read_request *ancr;
- struct page *pp, *ppt;
+ struct page *pp;
struct pagevec lrupv;
afs_int32 code = 0;
/* we do not flush, release, or unmap pages--that will be
* done for us by the background thread as each page comes in
* from the fileserver */
-out:
return afs_convert_code(code);
}
uio_t *auio;
struct iovec *iovecp;
struct nocache_read_request *ancr;
- afs_int32 isize;
+ int code;
ClearPageError(pp);
/* the background thread will free this */
ancr = osi_Alloc(sizeof(struct nocache_read_request));
ancr->auio = auio;
- ancr->offset = offset;
+ ancr->offset = page_offset(pp);
ancr->length = PAGE_SIZE;
credp = crref();
if(i_size_read(ip) > cache_bypass_threshold)
return 1;
default:
+ return 0;
}
- return 0;
}
/* Check if a file is permitted to bypass the cache by policy, and modify
static inline int
afs_linux_bypass_check(struct inode *ip) {
- struct cred* credp;
+ cred_t* credp;
int bypass = afs_linux_can_bypass(ip);
credp = crref();
- trydo_cache_transition(VTOAFS(ip)), credp, bypass);
+ trydo_cache_transition(VTOAFS(ip), credp, bypass);
crfree(credp);
return bypass;
return 0;
}
+/* Prepare an AFS vcache for writeback. Should be called with the vcache
+ * locked */
+static inline int
+afs_linux_prepare_writeback(struct vcache *avc) {
+ if (avc->f.states & CPageWrite) {
+ return AOP_WRITEPAGE_ACTIVATE;
+ }
+ avc->f.states |= CPageWrite;
+ return 0;
+}
+
+static inline int
+afs_linux_dopartialwrite(struct vcache *avc, cred_t *credp) {
+ struct vrequest treq;
+ int code = 0;
+
+ if (!afs_InitReq(&treq, credp))
+ code = afs_DoPartialWrite(avc, &treq);
+
+ return afs_convert_code(code);
+}
+
+static inline void
+afs_linux_complete_writeback(struct vcache *avc) {
+ avc->f.states &= ~CPageWrite;
+}
+
+/* Writeback a given page syncronously. Called with no AFS locks held */
static int
-afs_linux_writepage_sync(struct inode *ip, struct page *pp,
- unsigned long offset, unsigned int count)
+afs_linux_page_writeback(struct inode *ip, struct page *pp,
+ unsigned long offset, unsigned int count,
+ cred_t *credp)
{
struct vcache *vcp = VTOAFS(ip);
char *buffer;
afs_offs_t base;
int code = 0;
- cred_t *credp;
uio_t tuio;
struct iovec iovec;
int f_flags = 0;
buffer = kmap(pp) + offset;
base = page_offset(pp) + offset;
- credp = crref();
afs_maybe_lock_kernel();
AFS_GLOCK();
afs_Trace4(afs_iclSetp, CM_TRACE_UPDATEPAGE, ICL_TYPE_POINTER, vcp,
ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, page_count(pp),
ICL_TYPE_INT32, 99999);
- ObtainWriteLock(&vcp->lock, 532);
- if (vcp->f.states & CPageWrite) {
- ReleaseWriteLock(&vcp->lock);
- AFS_GUNLOCK();
- afs_maybe_unlock_kernel();
- crfree(credp);
- kunmap(pp);
- return AOP_WRITEPAGE_ACTIVATE;
- }
- vcp->f.states |= CPageWrite;
- ReleaseWriteLock(&vcp->lock);
-
setup_uio(&tuio, &iovec, buffer, base, count, UIO_WRITE, AFS_UIOSYS);
code = afs_write(vcp, &tuio, f_flags, credp, 0);
i_size_write(ip, vcp->f.m.Length);
ip->i_blocks = ((vcp->f.m.Length + 1023) >> 10) << 1;
- ObtainWriteLock(&vcp->lock, 533);
- if (!code) {
- struct vrequest treq;
-
- if (!afs_InitReq(&treq, credp))
- code = afs_DoPartialWrite(vcp, &treq);
- }
code = code ? afs_convert_code(code) : count - tuio.uio_resid;
- vcp->f.states &= ~CPageWrite;
- ReleaseWriteLock(&vcp->lock);
-
afs_Trace4(afs_iclSetp, CM_TRACE_UPDATEPAGE, ICL_TYPE_POINTER, vcp,
ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, page_count(pp),
ICL_TYPE_INT32, code);
AFS_GUNLOCK();
afs_maybe_unlock_kernel();
- crfree(credp);
kunmap(pp);
return code;
}
+static int
+afs_linux_writepage_sync(struct inode *ip, struct page *pp,
+ unsigned long offset, unsigned int count)
+{
+ int code;
+ int code1 = 0;
+ struct vcache *vcp = VTOAFS(ip);
+ cred_t *credp;
+
+ /* Catch recursive writeback. This occurs if the kernel decides
+ * writeback is required whilst we are writing to the cache, or
+ * flushing to the server. When we're running syncronously (as
+ * opposed to from writepage) we can't actually do anything about
+ * this case - as we can't return AOP_WRITEPAGE_ACTIVATE to write()
+ */
+ AFS_GLOCK();
+ ObtainWriteLock(&vcp->lock, 532);
+ afs_linux_prepare_writeback(vcp);
+ ReleaseWriteLock(&vcp->lock);
+ AFS_GUNLOCK();
+
+ credp = crref();
+ code = afs_linux_page_writeback(ip, pp, offset, count, credp);
+
+ afs_maybe_lock_kernel();
+ AFS_GLOCK();
+ ObtainWriteLock(&vcp->lock, 533);
+ if (code > 0)
+ code1 = afs_linux_dopartialwrite(vcp, credp);
+ afs_linux_complete_writeback(vcp);
+ ReleaseWriteLock(&vcp->lock);
+ AFS_GUNLOCK();
+ afs_maybe_unlock_kernel();
+ crfree(credp);
+
+ if (code1)
+ return code1;
+
+ return code;
+}
static int
#ifdef AOP_WRITEPAGE_TAKES_WRITEBACK_CONTROL
{
struct address_space *mapping = pp->mapping;
struct inode *inode;
+ struct vcache *vcp;
+ cred_t *credp;
unsigned int to = PAGE_CACHE_SIZE;
loff_t isize;
- int status = 0;
+ int code = 0;
+ int code1 = 0;
if (PageReclaim(pp)) {
return AOP_WRITEPAGE_ACTIVATE;
page_cache_get(pp);
- inode = (struct inode *)mapping->host;
+ inode = mapping->host;
+ vcp = VTOAFS(inode);
isize = i_size_read(inode);
/* Don't defeat an earlier truncate */
- if (page_offset(pp) > isize)
+ if (page_offset(pp) > isize) {
+ set_page_writeback(pp);
+ unlock_page(pp);
goto done;
+ }
+
+ AFS_GLOCK();
+ ObtainWriteLock(&vcp->lock, 537);
+ code = afs_linux_prepare_writeback(vcp);
+ if (code == AOP_WRITEPAGE_ACTIVATE) {
+ /* WRITEPAGE_ACTIVATE is the only return value that permits us
+ * to return with the page still locked */
+ ReleaseWriteLock(&vcp->lock);
+ AFS_GUNLOCK();
+ return code;
+ }
+
+ /* Grab the creds structure currently held in the vnode, and
+ * get a reference to it, in case it goes away ... */
+ credp = vcp->cred;
+ crhold(credp);
+ ReleaseWriteLock(&vcp->lock);
+ AFS_GUNLOCK();
+
+ set_page_writeback(pp);
+
+ SetPageUptodate(pp);
+
+ /* We can unlock the page here, because it's protected by the
+ * page_writeback flag. This should make us less vulnerable to
+ * deadlocking in afs_write and afs_DoPartialWrite
+ */
+ unlock_page(pp);
/* If this is the final page, then just write the number of bytes that
* are actually in it */
if ((isize - page_offset(pp)) < to )
to = isize - page_offset(pp);
- status = afs_linux_writepage_sync(inode, pp, 0, to);
+ code = afs_linux_page_writeback(inode, pp, 0, to, credp);
-done:
- SetPageUptodate(pp);
- if ( status != AOP_WRITEPAGE_ACTIVATE ) {
- /* XXX - do we need to redirty the page here? */
- unlock_page(pp);
- }
+ afs_maybe_lock_kernel();
+ AFS_GLOCK();
+ ObtainWriteLock(&vcp->lock, 538);
+
+ /* As much as we might like to ignore a file server error here,
+ * and just try again when we close(), unfortunately StoreAllSegments
+ * will invalidate our chunks if the server returns a permanent error,
+ * so we need to at least try and get that error back to the user
+ */
+ if (code == to)
+ code1 = afs_linux_dopartialwrite(vcp, credp);
+
+ afs_linux_complete_writeback(vcp);
+ ReleaseWriteLock(&vcp->lock);
+ crfree(credp);
+ AFS_GUNLOCK();
+ afs_maybe_unlock_kernel();
+done:
+ end_page_writeback(pp);
page_cache_release(pp);
- if (status == to)
+ if (code1)
+ return code1;
+
+ if (code == to)
return 0;
- else
- return status;
+
+ return code;
}
/* afs_linux_permission
return afs_convert_code(code);
}
-#if !defined(HAVE_WRITE_BEGIN)
static int
afs_linux_commit_write(struct file *file, struct page *page, unsigned offset,
unsigned to)
{
int code;
+ struct inode *inode = FILE_INODE(file);
+ loff_t pagebase = page_offset(page);
+
+ if (i_size_read(inode) < (pagebase + offset))
+ i_size_write(inode, pagebase + offset);
+
+ if (PageChecked(page)) {
+ SetPageUptodate(page);
+ ClearPageChecked(page);
+ }
- code = afs_linux_writepage_sync(file->f_dentry->d_inode, page,
- offset, to - offset);
+ code = afs_linux_writepage_sync(inode, page, offset, to - offset);
return code;
}
afs_linux_prepare_write(struct file *file, struct page *page, unsigned from,
unsigned to)
{
+
+ /* http://kerneltrap.org/node/4941 details the expected behaviour of
+ * prepare_write. Essentially, if the page exists within the file,
+ * and is not being fully written, then we should populate it.
+ */
+
+ if (!PageUptodate(page)) {
+ loff_t pagebase = page_offset(page);
+ loff_t isize = i_size_read(page->mapping->host);
+
+ /* Is the location we are writing to beyond the end of the file? */
+ if (pagebase >= isize ||
+ ((from == 0) && (pagebase + to) >= isize)) {
+ zero_user_segments(page, 0, from, to, PAGE_CACHE_SIZE);
+ SetPageChecked(page);
+ /* Are we we writing a full page */
+ } else if (from == 0 && to == PAGE_CACHE_SIZE) {
+ SetPageChecked(page);
+ /* Is the page readable, if it's wronly, we don't care, because we're
+ * not actually going to read from it ... */
+ } else if ((file->f_flags && O_ACCMODE) != O_WRONLY) {
+ /* We don't care if fillpage fails, because if it does the page
+ * won't be marked as up to date
+ */
+ afs_linux_fillpage(file, page);
+ }
+ }
return 0;
}
-#else
+#if defined(HAVE_WRITE_BEGIN)
static int
afs_linux_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
int code;
- unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
+
+ code = afs_linux_commit_write(file, page, from, from + len);
- code = afs_linux_writepage_sync(file->f_dentry->d_inode, page,
- from, copied);
unlock_page(page);
page_cache_release(page);
return code;
{
struct page *page;
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
+ int code;
+
page = grab_cache_page_write_begin(mapping, index, flags);
*pagep = page;
- return 0;
+ code = afs_linux_prepare_write(file, page, from, from + len);
+ if (code) {
+ unlock_page(page);
+ page_cache_release(page);
+ }
+
+ return code;
}
#endif