@@ -528,6 +528,18 @@ static void fuse_release_user_pages(struct fuse_req *req, int write)
}
}
+static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io)
+{
+ if (io->err)
+ return io->err;
+
+ if (io->bytes >= 0 && io->write)
+ return -EIO;
+
+ return io->bytes < 0 ? io->size : io->bytes;
+
+}
+
/**
* In case of short read, the caller sets 'pos' to the position of
* actual end of fuse request in IO request. Otherwise, if bytes_requested
@@ -547,6 +559,7 @@ static void fuse_release_user_pages(struct fuse_req *req, int write)
static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
{
int left;
+ bool is_sync = is_sync_kiocb(io->iocb);
spin_lock(&io->lock);
if (err)
@@ -555,27 +568,25 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
io->bytes = pos;
left = --io->reqs;
- spin_unlock(&io->lock);
- if (!left) {
- long res;
+ if (!left && is_sync) {
+ if (io->waiter)
+ wake_up_process(io->waiter);
+ }
- if (io->err)
- res = io->err;
- else if (io->bytes >= 0 && io->write)
- res = -EIO;
- else {
- res = io->bytes < 0 ? io->size : io->bytes;
+ spin_unlock(&io->lock);
- if (!is_sync_kiocb(io->iocb)) {
- struct inode *inode = file_inode(io->iocb->ki_filp);
- struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_inode *fi = get_fuse_inode(inode);
+ if (!left && !is_sync) {
+ ssize_t res = fuse_get_res_by_io(io);
- spin_lock(&fc->lock);
- fi->attr_version = ++fc->attr_version;
- spin_unlock(&fc->lock);
- }
+ if (res >= 0) {
+ struct inode *inode = file_inode(io->iocb->ki_filp);
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ struct fuse_inode *fi = get_fuse_inode(inode);
+
+ spin_lock(&fc->lock);
+ fi->attr_version = ++fc->attr_version;
+ spin_unlock(&fc->lock);
}
io->iocb->complete(io->iocb, res);
@@ -2798,6 +2809,29 @@ static inline loff_t fuse_round_up(loff_t off)
return round_up(off, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
}
+static ssize_t fuse_dio_wait(struct fuse_io_priv *io)
+{
+ ssize_t res;
+
+ spin_lock(&io->lock);
+
+ while (io->reqs) {
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ io->waiter = current;
+ spin_unlock(&io->lock);
+ io_schedule();
+ /* wake up sets us TASK_RUNNING */
+ spin_lock(&io->lock);
+ io->waiter = NULL;
+ }
+
+ spin_unlock(&io->lock);
+
+ res = fuse_get_res_by_io(io);
+ kfree(io);
+ return res;
+}
+
static ssize_t
fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
loff_t offset)
@@ -2841,10 +2875,8 @@ fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
/*
* By default, we want to optimize all I/Os with async request
* submission to the client filesystem if supported.
- *
- * XXX: need to add back support for this mode..
*/
- io->async = async_dio && !is_sync_kiocb(iocb);
+ io->async = async_dio;
io->iocb = iocb;
/*
@@ -2867,8 +2899,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
if (!is_sync_kiocb(iocb))
return -EIOCBQUEUED;
- // XXX: need fuse specific replacement
-// ret = wait_on_sync_kiocb(iocb);
+ ret = fuse_dio_wait(io);
} else {
kfree(io);
}
@@ -263,6 +263,7 @@ struct fuse_io_priv {
int err;
struct kiocb *iocb;
struct file *file;
+ struct task_struct *waiter;
};
/**