@@ -4,6 +4,8 @@
* Copyright (C) 2022, Bytedance Inc. All rights reserved.
*/
#include <linux/fscache.h>
+#include <linux/file.h>
+#include <linux/anon_inodes.h>
#include "internal.h"
static DEFINE_MUTEX(erofs_domain_list_lock);
@@ -22,6 +24,11 @@ struct erofs_fscache_request {
refcount_t ref;
};
+struct erofs_fscache_share_file_info {
+ erofs_off_t pa;
+ pgoff_t max_idx;
+};
+
static struct erofs_fscache_request *erofs_fscache_req_alloc(struct address_space *mapping,
loff_t start, size_t len)
{
@@ -316,6 +323,73 @@ const struct address_space_operations erofs_fscache_access_aops = {
.readahead = erofs_fscache_readahead,
};
+static int erofs_fscache_share_meta_release(struct inode *inode, struct file *filp)
+{
+ kfree(filp->private_data);
+ filp->private_data = NULL;
+ return 0;
+}
+
+static const struct file_operations erofs_fscache_share_meta_fops = {
+ .release = erofs_fscache_share_meta_release,
+};
+
+static int erofs_fscache_share_file_release(struct inode *inode, struct file *filp)
+{
+ fput(filp->private_data);
+ filp->private_data = NULL;
+ return 0;
+}
+
+static int erofs_fscache_share_file_open(struct inode *inode, struct file *filp)
+{
+ /* since page cache sharing is enabled only when i_size <= chunk_size */
+ struct erofs_map_blocks map = {}; /* .m_la = 0 */
+ struct erofs_map_dev mdev;
+ struct inode *realinode;
+ struct file *realfile;
+ struct erofs_fscache_share_file_info *finfo;
+ int ret;
+
+ ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
+ if (ret)
+ return ret;
+
+ mdev = (struct erofs_map_dev) {
+ .m_deviceid = map.m_deviceid,
+ .m_pa = map.m_pa,
+ };
+ ret = erofs_map_dev(inode->i_sb, &mdev);
+ if (ret)
+ return ret;
+
+ finfo = kzalloc(sizeof(struct erofs_fscache_share_file_info), GFP_KERNEL);
+ if (!finfo)
+ return -ENOMEM;
+ finfo->pa = mdev.m_pa;
+ finfo->max_idx = DIV_ROUND_UP(mdev.m_pa + inode->i_size, PAGE_SIZE);
+
+ realinode = mdev.m_fscache->inode;
+ ihold(realinode);
+ realfile = alloc_file_pseudo(realinode, filp->f_path.mnt, "[erofs]",
+ O_RDONLY, &erofs_fscache_share_meta_fops);
+ if (IS_ERR(realfile)) {
+ iput(realinode);
+ kfree(finfo);
+ return PTR_ERR(realfile);
+ }
+
+ realfile->private_data = finfo;
+ filp->private_data = realfile;
+ return 0;
+}
+
+const struct file_operations erofs_fscache_share_file_fops = {
+ .llseek = generic_file_llseek,
+ .open = erofs_fscache_share_file_open,
+ .release = erofs_fscache_share_file_release,
+};
+
static void erofs_fscache_domain_put(struct erofs_domain *domain)
{
mutex_lock(&erofs_domain_list_lock);
@@ -616,6 +616,7 @@ struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb,
void erofs_fscache_unregister_cookie(struct erofs_fscache *fscache);
extern const struct address_space_operations erofs_fscache_access_aops;
+extern const struct file_operations erofs_fscache_share_file_fops;
#else
static inline int erofs_fscache_register_fs(struct super_block *sb)
{
In prep for the following support for page cache sharing based mmap, allocate an anonymous file of corresponding blob, so that we can link associated vma to the blob later. Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com> --- fs/erofs/fscache.c | 74 +++++++++++++++++++++++++++++++++++++++++++++ fs/erofs/internal.h | 1 + 2 files changed, 75 insertions(+)