aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorMiklos Szeredi <miklos@szeredi.hu>2006-02-01 03:04:40 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-02-01 08:53:09 -0800
commit9cd684551124e71630ab96d238747051463f5b56 (patch)
tree52de759d09d79ded7ff6746a3e2d5c002c75b2f8 /fs
parentcaf736085f2f0d22a992a855d9caae14973f7ea4 (diff)
downloadlinux-9cd684551124e71630ab96d238747051463f5b56.tar.gz
[PATCH] fuse: fix async read for legacy filesystems
While asynchronous reads mean a performance improvement in most cases, if the filesystem assumed that reads are synchronous, then async reads may degrade performance (filesystem may receive reads out of order, which can confuse it's own readahead logic). With sshfs a 1.5 to 4 times slowdown can be measured. There's also a need for userspace filesystems to know whether asynchronous reads are supported by the kernel or not. To achive these, negotiate in the INIT request whether async reads will be used and the maximum readahead value. Update interface version to 7.6 If userspace uses a version earlier than 7.6, then disable async reads, and set maximum readahead value to the maximum read size, as done in previous versions. Signed-off-by: Miklos Szeredi <miklos@szeredi.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/fuse/file.c9
-rw-r--r--fs/fuse/fuse_i.h3
-rw-r--r--fs/fuse/inode.c14
3 files changed, 22 insertions, 4 deletions
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index a7ef5e716f3c3d..296351615b0014 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -335,9 +335,14 @@ static void fuse_send_readpages(struct fuse_req *req, struct file *file,
loff_t pos = page_offset(req->pages[0]);
size_t count = req->num_pages << PAGE_CACHE_SHIFT;
req->out.page_zeroing = 1;
- req->end = fuse_readpages_end;
fuse_read_fill(req, file, inode, pos, count, FUSE_READ);
- request_send_background(fc, req);
+ if (fc->async_read) {
+ req->end = fuse_readpages_end;
+ request_send_background(fc, req);
+ } else {
+ request_send(fc, req);
+ fuse_readpages_end(fc, req);
+ }
}
struct fuse_readpages_data {
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 46cf933aa3bf2d..4a83adfec968eb 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -272,6 +272,9 @@ struct fuse_conn {
reply, before any other request, and never cleared */
unsigned conn_error : 1;
+ /** Do readpages asynchronously? Only set in INIT */
+ unsigned async_read : 1;
+
/*
* The following bitfields are only for optimization purposes
* and hence races in setting them will not cause malfunction
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index c755a0440a6640..879e6fba94803e 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -473,6 +473,16 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
if (req->out.h.error || arg->major != FUSE_KERNEL_VERSION)
fc->conn_error = 1;
else {
+ unsigned long ra_pages;
+
+ if (arg->minor >= 6) {
+ ra_pages = arg->max_readahead / PAGE_CACHE_SIZE;
+ if (arg->flags & FUSE_ASYNC_READ)
+ fc->async_read = 1;
+ } else
+ ra_pages = fc->max_read / PAGE_CACHE_SIZE;
+
+ fc->bdi.ra_pages = min(fc->bdi.ra_pages, ra_pages);
fc->minor = arg->minor;
fc->max_write = arg->minor < 5 ? 4096 : arg->max_write;
}
@@ -496,6 +506,8 @@ static void fuse_send_init(struct fuse_conn *fc)
arg->major = FUSE_KERNEL_VERSION;
arg->minor = FUSE_KERNEL_MINOR_VERSION;
+ arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE;
+ arg->flags |= FUSE_ASYNC_READ;
req->in.h.opcode = FUSE_INIT;
req->in.numargs = 1;
req->in.args[0].size = sizeof(*arg);
@@ -552,8 +564,6 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
fc->user_id = d.user_id;
fc->group_id = d.group_id;
fc->max_read = d.max_read;
- if (fc->max_read / PAGE_CACHE_SIZE < fc->bdi.ra_pages)
- fc->bdi.ra_pages = fc->max_read / PAGE_CACHE_SIZE;
/* Used by get_root_inode() */
sb->s_fs_info = fc;