@@ -17,6 +17,6 @@ zuf-y += md.o t1.o t2.o
zuf-y += zuf-core.o zuf-root.o
# Main FS
-zuf-y += rw.o
+zuf-y += rw.o mmap.o
zuf-y += super.o inode.o directory.o namei.o file.o symlink.o
zuf-y += module.o
@@ -46,6 +46,10 @@ bool zuf_dir_emit(struct super_block *sb, struct dir_context *ctx,
uint zuf_prepare_symname(struct zufs_ioc_new_inode *ioc_new_inode,
const char *symname, ulong len, struct page *pages[2]);
+
+/* mmap.c */
+int zuf_file_mmap(struct file *file, struct vm_area_struct *vma);
+
/* rw.c */
int _zuf_get_put_block(struct zuf_sb_info *sbi, struct zuf_inode_info *zii,
enum e_zufs_operation op, int rw, ulong index,
@@ -61,11 +65,17 @@ int zuf_iom_execute_sync(struct super_block *sb, struct inode *inode,
__u64 *iom_e, uint iom_n);
int zuf_iom_execute_async(struct super_block *sb, struct zus_iomap_build *iomb,
__u64 *iom_e_user, uint iom_n);
+/* file.c */
+int zuf_isync(struct inode *inode, loff_t start, loff_t end, int datasync);
+
/* super.c */
int zuf_init_inodecache(void);
void zuf_destroy_inodecache(void);
+void zuf_sync_inc(struct inode *inode);
+void zuf_sync_dec(struct inode *inode, ulong write_unmapped);
+
struct dentry *zuf_mount(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data);
@@ -45,6 +45,7 @@
#define zuf_dbg_t2(s, args ...) zuf_chan_debug("t2dbg", s, ##args)
#define zuf_dbg_t2_rw(s, args ...) zuf_chan_debug("t2grw", s, ##args)
#define zuf_dbg_core(s, args ...) zuf_chan_debug("core ", s, ##args)
+#define zuf_dbg_mmap(s, args ...) zuf_chan_debug("mmap ", s, ##args)
#define zuf_dbg_zus(s, args ...) zuf_chan_debug("zusdg", s, ##args)
#define zuf_dbg_verbose(s, args ...) zuf_chan_debug("d-oto", s, ##args)
@@ -174,6 +174,69 @@ static loff_t zuf_llseek(struct file *file, loff_t offset, int whence)
return err ?: ioc_seek.offset_out;
}
+/* This function is called by both msync() and fsync(). */
+int zuf_isync(struct inode *inode, loff_t start, loff_t end, int datasync)
+{
+ struct zuf_inode_info *zii = ZUII(inode);
+ struct zufs_ioc_range ioc_range = {
+ .hdr.in_len = sizeof(ioc_range),
+ .hdr.operation = ZUFS_OP_SYNC,
+ .zus_ii = zii->zus_ii,
+ .offset = start,
+ .opflags = datasync,
+ };
+ loff_t isize;
+ ulong uend = end + 1;
+ int err = 0;
+
+ zuf_dbg_vfs(
+ "[%ld] start=0x%llx end=0x%llx datasync=%d write_mapped=%d\n",
+ inode->i_ino, start, end, datasync,
+ atomic_read(&zii->write_mapped));
+
+ /* We want to serialize the syncs so they don't fight with each other
+ * and is though more efficient, but we do not want to lock out
+ * read/writes and page-faults so we have a special sync semaphore
+ */
+ zuf_smw_lock(zii);
+
+ isize = i_size_read(inode);
+ if (!isize) {
+ zuf_dbg_mmap("[%ld] file is empty\n", inode->i_ino);
+ goto out;
+ }
+ if (isize < uend)
+ uend = isize;
+ if (uend < start) {
+ zuf_dbg_mmap("[%ld] isize=0x%llx start=0x%llx end=0x%lx\n",
+ inode->i_ino, isize, start, uend);
+ err = -ENODATA;
+ goto out;
+ }
+
+ if (!atomic_read(&zii->write_mapped))
+ goto out; /* Nothing to do on this inode */
+
+ ioc_range.length = uend - start;
+ unmap_mapping_range(inode->i_mapping, start, ioc_range.length, 0);
+
+ err = zufc_dispatch(ZUF_ROOT(SBI(inode->i_sb)), &ioc_range.hdr,
+ NULL, 0);
+ if (unlikely(err))
+ zuf_dbg_err("zufc_dispatch failed => %d\n", err);
+
+ zuf_sync_dec(inode, ioc_range.write_unmapped);
+
+out:
+ zuf_smw_unlock(zii);
+ return err;
+}
+
+static int zuf_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+{
+ return zuf_isync(file_inode(file), start, end, datasync);
+}
+
/* This callback is called when a file is closed */
static int zuf_flush(struct file *file, fl_owner_t id)
{
@@ -439,7 +502,9 @@ const struct file_operations zuf_file_operations = {
.llseek = zuf_llseek,
.read_iter = zuf_read_iter,
.write_iter = zuf_write_iter,
+ .mmap = zuf_file_mmap,
.open = generic_file_open,
+ .fsync = zuf_fsync,
.flush = zuf_flush,
.release = zuf_file_release,
.fallocate = zuf_fallocate,
@@ -270,6 +270,7 @@ void zuf_evict_inode(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
struct zuf_inode_info *zii = ZUII(inode);
+ int write_mapped;
if (!inode->i_nlink) {
if (unlikely(!zii->zi)) {
@@ -312,6 +313,15 @@ void zuf_evict_inode(struct inode *inode)
zii->zero_page = NULL;
}
+ /* ZUS on evict has synced all mmap dirty pages, YES? */
+ write_mapped = atomic_read(&zii->write_mapped);
+ if (unlikely(write_mapped || !list_empty(&zii->i_mmap_dirty))) {
+ zuf_dbg_mmap("[%ld] !!!! write_mapped=%d list_empty=%d\n",
+ inode->i_ino, write_mapped,
+ list_empty(&zii->i_mmap_dirty));
+ zuf_sync_dec(inode, write_mapped);
+ }
+
clear_inode(inode);
}
new file mode 100644
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * BRIEF DESCRIPTION
+ *
+ * Read/Write operations.
+ *
+ * Copyright (c) 2018 NetApp Inc. All rights reserved.
+ *
+ * ZUFS-License: GPL-2.0. See module.c for LICENSE details.
+ *
+ * Authors:
+ * Boaz Harrosh <boazh@netapp.com>
+ */
+
+#include <linux/pfn_t.h>
+#include "zuf.h"
+
+/* ~~~ Functions for mmap and page faults ~~~ */
+
+/* MAP_PRIVATE, copy data to user private page (cow_page) */
+static int _cow_private_page(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct inode *inode = vma->vm_file->f_mapping->host;
+ struct zuf_sb_info *sbi = SBI(inode->i_sb);
+ int err;
+
+ /* Basically a READ into vmf->cow_page */
+ err = zuf_rw_read_page(sbi, inode, vmf->cow_page,
+ md_p2o(vmf->pgoff));
+ if (unlikely(err && err != -EINTR)) {
+ zuf_err("[%ld] read_page failed bn=0x%lx address=0x%lx => %d\n",
+ inode->i_ino, vmf->pgoff, vmf->address, err);
+ /* FIXME: Probably return VM_FAULT_SIGBUS */
+ }
+
+ /*HACK: This is an hack since Kernel v4.7 where a VM_FAULT_LOCKED with
+ * vmf->page==NULL is no longer supported. Looks like for now this way
+ * works well. We let mm mess around with unlocking and putting its own
+ * cow_page.
+ */
+ vmf->page = vmf->cow_page;
+ get_page(vmf->page);
+ lock_page(vmf->page);
+
+ return VM_FAULT_LOCKED;
+}
+
+int _rw_init_zero_page(struct zuf_inode_info *zii)
+{
+ if (zii->zero_page)
+ return 0;
+
+ zii->zero_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (unlikely(!zii->zero_page))
+ return -ENOMEM;
+ zii->zero_page->mapping = zii->vfs_inode.i_mapping;
+ return 0;
+}
+
+static int zuf_write_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
+ bool pfn_mkwrite)
+{
+ struct inode *inode = vma->vm_file->f_mapping->host;
+ struct zuf_sb_info *sbi = SBI(inode->i_sb);
+ struct zuf_inode_info *zii = ZUII(inode);
+ struct zus_inode *zi = zii->zi;
+ struct zufs_ioc_IO get_block = {};
+ int fault = VM_FAULT_SIGBUS;
+ ulong addr = vmf->address;
+ pgoff_t size;
+ pfn_t pfnt;
+ ulong pfn;
+ int err;
+
+ zuf_dbg_mmap("[%ld] vm_start=0x%lx vm_end=0x%lx VA=0x%lx "
+ "pgoff=0x%lx vmf_flags=0x%x cow_page=%p page=%p\n",
+ _zi_ino(zi), vma->vm_start, vma->vm_end, addr, vmf->pgoff,
+ vmf->flags, vmf->cow_page, vmf->page);
+
+ if (unlikely(vmf->page && vmf->page != zii->zero_page)) {
+ zuf_err("[%ld] vm_start=0x%lx vm_end=0x%lx VA=0x%lx "
+ "pgoff=0x%lx vmf_flags=0x%x page=%p cow_page=%p\n",
+ _zi_ino(zi), vma->vm_start, vma->vm_end, addr,
+ vmf->pgoff, vmf->flags, vmf->page, vmf->cow_page);
+ return VM_FAULT_SIGBUS;
+ }
+
+ sb_start_pagefault(inode->i_sb);
+ zuf_smr_lock_pagefault(zii);
+
+ size = md_o2p_up(i_size_read(inode));
+ if (unlikely(vmf->pgoff >= size)) {
+ ulong pgoff = vma->vm_pgoff + md_o2p(addr - vma->vm_start);
+
+ zuf_err("[%ld] pgoff(0x%lx)(0x%lx) >= size(0x%lx) => SIGBUS\n",
+ _zi_ino(zi), vmf->pgoff, pgoff, size);
+
+ fault = VM_FAULT_SIGBUS;
+ goto out;
+ }
+
+ if (vmf->cow_page) {
+ fault = _cow_private_page(vma, vmf);
+ goto out;
+ }
+
+ zus_inode_cmtime_now(inode, zi);
+ /* NOTE: zus needs to flush the zi */
+
+ err = _zuf_get_put_block(sbi, zii, ZUFS_OP_GET_BLOCK, WRITE, vmf->pgoff,
+ &get_block);
+ if (unlikely(err)) {
+ zuf_dbg_err("_get_put_block failed => %d\n", err);
+ goto out;
+ }
+
+ if ((get_block.gp_block.ret_flags & ZUFS_GBF_NEW) || !pfn_mkwrite) {
+ inode->i_blocks = le64_to_cpu(zii->zi->i_blocks);
+ /* newly created block */
+ unmap_mapping_range(inode->i_mapping, vmf->pgoff << PAGE_SHIFT,
+ PAGE_SIZE, 0);
+ } else if (pfn_mkwrite) {
+ /* If the block did not change just tell mm to flip
+ * the write bit
+ */
+ fault = VM_FAULT_WRITE;
+ goto skip_insert;
+ }
+
+ if (unlikely(get_block.gp_block.pmem_bn == 0)) {
+ zuf_err("[%ld] pmem_bn=0 rw=0x%x ret_flags=0x%x priv=0x%lx but no error?\n",
+ _zi_ino(zi), get_block.gp_block.rw,
+ get_block.gp_block.ret_flags,
+ (ulong)get_block.gp_block.priv);
+ fault = VM_FAULT_SIGBUS;
+ goto out;
+ }
+
+ pfn = md_pfn(sbi->md, get_block.gp_block.pmem_bn);
+ pfnt = phys_to_pfn_t(PFN_PHYS(pfn), PFN_MAP | PFN_DEV);
+ fault = vmf_insert_mixed_mkwrite(vma, addr, pfnt);
+ err = zuf_flt_to_err(fault);
+ if (unlikely(err)) {
+ zuf_err("vm_insert_mixed_mkwrite failed => %d\n", err);
+ goto put;
+ }
+
+ zuf_dbg_mmap("[%ld] vm_insert_mixed 0x%lx prot=0x%lx => %d\n",
+ _zi_ino(zi), pfn, vma->vm_page_prot.pgprot, err);
+
+skip_insert:
+ zuf_sync_inc(inode);
+put:
+ _zuf_get_put_block(sbi, zii, ZUFS_OP_PUT_BLOCK, WRITE, vmf->pgoff,
+ &get_block);
+out:
+ zuf_smr_unlock(zii);
+ sb_end_pagefault(inode->i_sb);
+ return fault;
+}
+
+static int zuf_pfn_mkwrite(struct vm_fault *vmf)
+{
+ return zuf_write_fault(vmf->vma, vmf, true);
+}
+
+static int zuf_read_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct inode *inode = vma->vm_file->f_mapping->host;
+ struct zuf_sb_info *sbi = SBI(inode->i_sb);
+ struct zuf_inode_info *zii = ZUII(inode);
+ struct zus_inode *zi = zii->zi;
+ struct zufs_ioc_IO get_block = {};
+ int fault = VM_FAULT_SIGBUS;
+ ulong addr = vmf->address;
+ pgoff_t size;
+ pfn_t pfnt;
+ ulong pfn;
+ int err;
+
+ zuf_dbg_mmap("[%ld] vm_start=0x%lx vm_end=0x%lx VA=0x%lx "
+ "pgoff=0x%lx vmf_flags=0x%x cow_page=%p page=%p\n",
+ _zi_ino(zi), vma->vm_start, vma->vm_end, addr, vmf->pgoff,
+ vmf->flags, vmf->cow_page, vmf->page);
+
+ zuf_smr_lock_pagefault(zii);
+
+ size = md_o2p_up(i_size_read(inode));
+ if (unlikely(vmf->pgoff >= size)) {
+ ulong pgoff = vma->vm_pgoff + md_o2p(addr - vma->vm_start);
+
+ zuf_err("[%ld] pgoff(0x%lx)(0x%lx) >= size(0x%lx) => SIGBUS\n",
+ _zi_ino(zi), vmf->pgoff, pgoff, size);
+ goto out;
+ }
+
+ if (vmf->cow_page) {
+ zuf_warn("cow is read\n");
+ fault = _cow_private_page(vma, vmf);
+ goto out;
+ }
+
+ file_accessed(vma->vm_file);
+ /* NOTE: zus needs to flush the zi */
+
+ err = _zuf_get_put_block(sbi, zii, ZUFS_OP_GET_BLOCK, READ, vmf->pgoff,
+ &get_block);
+ if (unlikely(err && err != -EINTR)) {
+ zuf_err("_get_put_block failed => %d\n", err);
+ goto out;
+ }
+
+ if (get_block.gp_block.pmem_bn == 0) {
+ /* Hole in file */
+ err = _rw_init_zero_page(zii);
+ if (unlikely(err))
+ goto out;
+
+ err = vm_insert_page(vma, addr, zii->zero_page);
+ zuf_dbg_mmap("[%ld] inserted zero\n", _zi_ino(zi));
+
+ /* NOTE: we are fooling mm, we do not need this page
+ * to be locked and get(ed)
+ */
+ fault = VM_FAULT_NOPAGE;
+ goto out;
+ }
+
+ /* We have a real page */
+ pfn = md_pfn(sbi->md, get_block.gp_block.pmem_bn);
+ pfnt = phys_to_pfn_t(PFN_PHYS(pfn), PFN_MAP | PFN_DEV);
+ fault = vmf_insert_mixed(vma, addr, pfnt);
+ err = zuf_flt_to_err(fault);
+ if (unlikely(err)) {
+ zuf_err("[%ld] vm_insert_page/mixed => %d\n", _zi_ino(zi), err);
+ goto put;
+ }
+
+ zuf_dbg_mmap("[%ld] vm_insert_mixed 0x%lx prot=0x%lx => %d\n",
+ _zi_ino(zi), pfn, vma->vm_page_prot.pgprot, err);
+
+put:
+ _zuf_get_put_block(sbi, zii, ZUFS_OP_PUT_BLOCK, READ, vmf->pgoff,
+ &get_block);
+out:
+ zuf_smr_unlock(zii);
+ return fault;
+}
+
+static int zuf_fault(struct vm_fault *vmf)
+{
+ bool write_fault = (0 != (vmf->flags & FAULT_FLAG_WRITE));
+
+ if (write_fault)
+ return zuf_write_fault(vmf->vma, vmf, false);
+ else
+ return zuf_read_fault(vmf->vma, vmf);
+}
+
+static int zuf_page_mkwrite(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct inode *inode = vma->vm_file->f_mapping->host;
+ ulong addr = vmf->address;
+
+ /* our zero page doesn't really hold the correct offset to the file in
+ * page->index so vmf->pgoff is incorrect, lets fix that
+ */
+ vmf->pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
+
+ zuf_dbg_mmap("[%ld] pgoff=0x%lx\n", inode->i_ino, vmf->pgoff);
+
+ /* call fault handler to get a real page for writing */
+ return zuf_write_fault(vma, vmf, false);
+}
+
+static void zuf_mmap_open(struct vm_area_struct *vma)
+{
+ struct zuf_inode_info *zii = ZUII(file_inode(vma->vm_file));
+
+ atomic_inc(&zii->vma_count);
+}
+
+static void zuf_mmap_close(struct vm_area_struct *vma)
+{
+ struct inode *inode = file_inode(vma->vm_file);
+ int vma_count = atomic_dec_return(&ZUII(inode)->vma_count);
+
+ if (unlikely(vma_count < 0))
+ zuf_err("[%ld] WHAT??? vma_count=%d\n",
+ inode->i_ino, vma_count);
+ else if (unlikely(vma_count == 0)) {
+ struct zuf_inode_info *zii = ZUII(inode);
+ struct zufs_ioc_mmap_close mmap_close = {};
+ int err;
+
+ mmap_close.hdr.operation = ZUFS_OP_MMAP_CLOSE;
+ mmap_close.hdr.in_len = sizeof(mmap_close);
+
+ mmap_close.zus_ii = zii->zus_ii;
+ mmap_close.rw = 0; /* TODO: Do we need this */
+
+ zuf_smr_lock(zii);
+
+ err = zufc_dispatch(ZUF_ROOT(SBI(inode->i_sb)), &mmap_close.hdr,
+ NULL, 0);
+ if (unlikely(err))
+ zuf_dbg_err("[%ld] err=%d\n", inode->i_ino, err);
+
+ zuf_smr_unlock(zii);
+ }
+}
+
+static const struct vm_operations_struct zuf_vm_ops = {
+ .fault = zuf_fault,
+ .page_mkwrite = zuf_page_mkwrite,
+ .pfn_mkwrite = zuf_pfn_mkwrite,
+ .open = zuf_mmap_open,
+ .close = zuf_mmap_close,
+};
+
+int zuf_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct inode *inode = file_inode(file);
+ struct zuf_inode_info *zii = ZUII(inode);
+
+ file_accessed(file);
+
+ vma->vm_flags |= VM_MIXEDMAP;
+ vma->vm_ops = &zuf_vm_ops;
+
+ atomic_inc(&zii->vma_count);
+
+ zuf_dbg_vfs("[%ld] start=0x%lx end=0x%lx flags=0x%lx page_prot=0x%lx\n",
+ file->f_mapping->host->i_ino, vma->vm_start, vma->vm_end,
+ vma->vm_flags, pgprot_val(vma->vm_page_prot));
+
+ return 0;
+}
@@ -570,6 +570,90 @@ static int zuf_update_s_wtime(struct super_block *sb)
return 0;
}
+static void _sync_add_inode(struct inode *inode)
+{
+ struct zuf_sb_info *sbi = SBI(inode->i_sb);
+ struct zuf_inode_info *zii = ZUII(inode);
+
+ zuf_dbg_mmap("[%ld] write_mapped=%d\n",
+ inode->i_ino, atomic_read(&zii->write_mapped));
+
+ spin_lock(&sbi->s_mmap_dirty_lock);
+
+ /* Because we are lazy removing the inodes, only in case of an fsync
+ * or an evict_inode. It is fine if we are call multiple times.
+ */
+ if (list_empty(&zii->i_mmap_dirty))
+ list_add(&zii->i_mmap_dirty, &sbi->s_mmap_dirty);
+
+ spin_unlock(&sbi->s_mmap_dirty_lock);
+}
+
+static void _sync_remove_inode(struct inode *inode)
+{
+ struct zuf_sb_info *sbi = SBI(inode->i_sb);
+ struct zuf_inode_info *zii = ZUII(inode);
+
+ zuf_dbg_mmap("[%ld] write_mapped=%d\n",
+ inode->i_ino, atomic_read(&zii->write_mapped));
+
+ spin_lock(&sbi->s_mmap_dirty_lock);
+ list_del_init(&zii->i_mmap_dirty);
+ spin_unlock(&sbi->s_mmap_dirty_lock);
+}
+
+void zuf_sync_inc(struct inode *inode)
+{
+ struct zuf_inode_info *zii = ZUII(inode);
+
+ if (1 == atomic_inc_return(&zii->write_mapped))
+ _sync_add_inode(inode);
+}
+
+/* zuf_sync_dec will unmapped in batches */
+void zuf_sync_dec(struct inode *inode, ulong write_unmapped)
+{
+ struct zuf_inode_info *zii = ZUII(inode);
+
+ if (0 == atomic_sub_return(write_unmapped, &zii->write_mapped))
+ _sync_remove_inode(inode);
+}
+
+/*
+ * We must fsync any mmap-active inodes
+ */
+static int zuf_sync_fs(struct super_block *sb, int wait)
+{
+ struct zuf_sb_info *sbi = SBI(sb);
+ struct zuf_inode_info *zii, *t;
+ enum {to_clean_size = 120};
+ struct zuf_inode_info *zii_to_clean[to_clean_size];
+ uint i, to_clean;
+
+ zuf_dbg_vfs("Syncing wait=%d\n", wait);
+more_inodes:
+ spin_lock(&sbi->s_mmap_dirty_lock);
+ to_clean = 0;
+ list_for_each_entry_safe(zii, t, &sbi->s_mmap_dirty, i_mmap_dirty) {
+ list_del_init(&zii->i_mmap_dirty);
+ zii_to_clean[to_clean++] = zii;
+ if (to_clean >= to_clean_size)
+ break;
+ }
+ spin_unlock(&sbi->s_mmap_dirty_lock);
+
+ if (!to_clean)
+ return 0;
+
+ for (i = 0; i < to_clean; ++i)
+ zuf_isync(&zii_to_clean[i]->vfs_inode, 0, ~0 - 1, 1);
+
+ if (to_clean == to_clean_size)
+ goto more_inodes;
+
+ return 0;
+}
+
static struct inode *zuf_alloc_inode(struct super_block *sb)
{
struct zuf_inode_info *zii;
@@ -592,6 +676,12 @@ static void _init_once(void *foo)
struct zuf_inode_info *zii = foo;
inode_init_once(&zii->vfs_inode);
+ INIT_LIST_HEAD(&zii->i_mmap_dirty);
+ zii->zi = NULL;
+ zii->zero_page = NULL;
+ init_rwsem(&zii->in_sync);
+ atomic_set(&zii->vma_count, 0);
+ atomic_set(&zii->write_mapped, 0);
}
int __init zuf_init_inodecache(void)
@@ -621,6 +711,7 @@ static struct super_operations zuf_sops = {
.put_super = zuf_put_super,
.freeze_fs = zuf_update_s_wtime,
.unfreeze_fs = zuf_update_s_wtime,
+ .sync_fs = zuf_sync_fs,
.statfs = zuf_statfs,
.remount_fs = zuf_remount,
.show_options = zuf_show_options,
@@ -781,8 +781,10 @@ const char *zuf_op_name(enum e_zufs_operation op)
CASE_ENUM_NAME(ZUFS_OP_WRITE );
CASE_ENUM_NAME(ZUFS_OP_GET_BLOCK );
CASE_ENUM_NAME(ZUFS_OP_PUT_BLOCK );
+ CASE_ENUM_NAME(ZUFS_OP_MMAP_CLOSE );
CASE_ENUM_NAME(ZUFS_OP_GET_SYMLINK );
CASE_ENUM_NAME(ZUFS_OP_SETATTR );
+ CASE_ENUM_NAME(ZUFS_OP_SYNC );
CASE_ENUM_NAME(ZUFS_OP_FALLOCATE );
CASE_ENUM_NAME(ZUFS_OP_LLSEEK );
CASE_ENUM_NAME(ZUFS_OP_BREAK );
@@ -158,6 +158,9 @@ struct zuf_inode_info {
/* Stuff for mmap write */
struct rw_semaphore in_sync;
+ struct list_head i_mmap_dirty;
+ atomic_t write_mapped;
+ atomic_t vma_count;
struct page *zero_page; /* TODO: Remove */
/* cookies from Server */
@@ -344,8 +344,10 @@ enum e_zufs_operation {
ZUFS_OP_WRITE,
ZUFS_OP_GET_BLOCK,
ZUFS_OP_PUT_BLOCK,
+ ZUFS_OP_MMAP_CLOSE,
ZUFS_OP_GET_SYMLINK,
ZUFS_OP_SETATTR,
+ ZUFS_OP_SYNC,
ZUFS_OP_FALLOCATE,
ZUFS_OP_LLSEEK,
@@ -516,6 +518,13 @@ static inline bool zufs_zde_emit(struct zufs_readdir_iter *rdi, __u64 ino,
return true;
}
+struct zufs_ioc_mmap_close {
+ struct zufs_ioc_hdr hdr;
+ /* IN */
+ struct zus_inode_info *zus_ii;
+ __u64 rw; /* Some flags + READ or WRITE */
+};
+
/* ZUFS_OP_GET_SYMLINK */
struct zufs_ioc_get_link {
struct zufs_ioc_hdr hdr;