@@ -108,6 +108,40 @@ iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
truncate_pagecache_range(inode, max(pos, i_size), pos + len);
}
+static void
+iomap_read_inline_data(struct page *page, struct iomap *iomap, loff_t size)
+{
+ void *data = iomap->inline_data;
+ void *addr;
+
+ if (PageUptodate(page))
+ return;
+
+ BUG_ON(page->index);
+ BUG_ON(size > PAGE_SIZE - offset_in_page(data));
+
+ addr = kmap_atomic(page);
+ memcpy(addr, data, size);
+ memset(addr + size, 0, PAGE_SIZE - size);
+ kunmap_atomic(addr);
+ SetPageUptodate(page);
+}
+
+static void
+iomap_write_inline_data(struct page *page, struct iomap *iomap, off_t pos,
+ unsigned copied)
+{
+ void *data = iomap->inline_data;
+ void *addr;
+
+ BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(data));
+ WARN_ON_ONCE(!PageUptodate(page));
+
+ addr = kmap_atomic(page);
+ memcpy(data + pos, addr + pos, copied);
+ kunmap_atomic(addr);
+}
+
static int
iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
struct page **pagep, struct iomap *iomap)
@@ -125,6 +159,11 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
if (!page)
return -ENOMEM;
+ if (iomap->type == IOMAP_INLINE) {
+ iomap_read_inline_data(page, iomap, inode->i_size);
+ goto out;
+ }
+
status = __block_write_begin_int(page, pos, len, NULL, iomap);
if (unlikely(status)) {
unlock_page(page);
@@ -134,16 +173,23 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
iomap_write_failed(inode, pos, len);
}
+out:
*pagep = page;
return status;
}
static int
iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
- unsigned copied, struct page *page)
+ unsigned copied, struct page *page, struct iomap *iomap)
{
int ret;
+ if (iomap->type == IOMAP_INLINE) {
+ iomap_write_inline_data(page, iomap, pos, copied);
+ __generic_write_end(inode, pos, copied, page, true);
+ return copied;
+ }
+
ret = generic_write_end(NULL, inode->i_mapping, pos, len,
copied, page, NULL);
if (ret < len)
@@ -200,7 +246,8 @@ iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
flush_dcache_page(page);
- status = iomap_write_end(inode, pos, bytes, copied, page);
+ status = iomap_write_end(inode, pos, bytes, copied, page,
+ iomap);
if (unlikely(status < 0))
break;
copied = status;
@@ -294,7 +341,7 @@ iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
WARN_ON_ONCE(!PageUptodate(page));
- status = iomap_write_end(inode, pos, bytes, bytes, page);
+ status = iomap_write_end(inode, pos, bytes, bytes, page, iomap);
if (unlikely(status <= 0)) {
if (WARN_ON_ONCE(status == 0))
return -EIO;
@@ -346,7 +393,7 @@ static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
zero_user(page, offset, bytes);
mark_page_accessed(page);
- return iomap_write_end(inode, pos, bytes, bytes, page);
+ return iomap_write_end(inode, pos, bytes, bytes, page, iomap);
}
static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
@@ -47,6 +47,7 @@ struct iomap {
u64 length; /* length of mapping, bytes */
u16 type; /* type of mapping */
u16 flags; /* flags for mapping */
+ void *inline_data; /* inline data buffer */
struct block_device *bdev; /* block device for I/O */
struct dax_device *dax_dev; /* dax_dev for dax operations */
};
Add generic inline data handling by adding a pointer to the inline data region to struct iomap. When handling a buffered IOMAP_INLINE write, iomap_write_begin will copy the current inline data from the inline data region into the page cache, and iomap_write_end will copy the changes in the page cache back to the inline data region. This doesn't cover inline data reads and direct I/O yet because so far, we have no users. Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com> --- fs/iomap.c | 55 +++++++++++++++++++++++++++++++++++++++---- include/linux/iomap.h | 1 + 2 files changed, 52 insertions(+), 4 deletions(-)