@@ -622,6 +622,20 @@ static ssize_t cifs_file_aio_write(struct kiocb
*iocb, const struct iovec *iov,
return written;
}
+static ssize_t cifs_file_aio_read(struct kiocb *iocb, const struct
iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
+ ssize_t read;
+
+ if (CIFS_I(inode)->clientCanCacheRead)
+ read = generic_file_aio_read(iocb, iov, nr_segs, pos);
+ else
+ read = cifs_user_read(iocb->ki_filp, iov->iov_base,
iov->iov_len, &pos);
+ return read;
+}
+
+
Hello! I found an interesting bug in cifs driver. When we don't use "direct" option during mounting, we read only from cache. My tests prove that it isn't right - if client don't keep oplock and inode->clientCanCacheRead == false, we should read from server if we want to get valid data. Here is patch, that provide reading from cache only if it's valid. static loff_t cifs_llseek(struct file *file, loff_t offset, int origin) { /* origin == SEEK_END => we must revalidate the cached file length */ @@ -733,7 +747,7 @@ const struct inode_operations cifs_symlink_inode_ops = { const struct file_operations cifs_file_ops = { .read = do_sync_read, .write = do_sync_write, - .aio_read = generic_file_aio_read, + .aio_read = cifs_file_aio_read, .aio_write = cifs_file_aio_write, .open = cifs_open, .release = cifs_close, Signed-off-by: Pavel Shilovsky <piastryyy@gmail.com> -- Best regards, Pavel Shilovsky.