@@ -539,7 +539,7 @@ void kernfs_put(struct kernfs_node *kn)
}
kfree(kn->iattr);
spin_lock(&kernfs_idr_lock);
- idr_remove(&root->ino_idr, kn->ino);
+ idr_remove(&root->ino_idr, kn->id.ino);
spin_unlock(&kernfs_idr_lock);
kmem_cache_free(kernfs_node_cache, kn);
@@ -639,8 +639,8 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
idr_preload_end();
if (ret < 0)
goto err_out2;
- kn->ino = ret;
- kn->generation = atomic_inc_return(&root->next_generation);
+ kn->id.ino = ret;
+ kn->id.generation = atomic_inc_return(&root->next_generation);
/* set ino first. Above atomic_inc_return has a barrier */
atomic_set(&kn->count, 1);
@@ -711,7 +711,7 @@ struct kernfs_node *kernfs_find_and_get_node_by_ino(struct kernfs_root *root,
* 'ino' should be uptodate, hence we can use 'ino' to filter stale
* node.
*/
- if (kn->ino != ino)
+ if (kn->id.ino != ino)
goto out;
rcu_read_unlock();
@@ -1644,7 +1644,7 @@ static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
const char *name = pos->name;
unsigned int type = dt_type(pos);
int len = strlen(name);
- ino_t ino = pos->ino;
+ ino_t ino = pos->id.ino;
ctx->pos = pos->hash;
file->private_data = pos;
@@ -895,7 +895,7 @@ static void kernfs_notify_workfn(struct work_struct *work)
* have the matching @file available. Look up the inodes
* and generate the events manually.
*/
- inode = ilookup(info->sb, kn->ino);
+ inode = ilookup(info->sb, kn->id.ino);
if (!inode)
continue;
@@ -903,7 +903,7 @@ static void kernfs_notify_workfn(struct work_struct *work)
if (parent) {
struct inode *p_inode;
- p_inode = ilookup(info->sb, parent->ino);
+ p_inode = ilookup(info->sb, parent->id.ino);
if (p_inode) {
fsnotify(p_inode, FS_MODIFY | FS_EVENT_ON_CHILD,
inode, FSNOTIFY_EVENT_INODE, kn->name, 0);
@@ -220,7 +220,7 @@ static void kernfs_init_inode(struct kernfs_node *kn, struct inode *inode)
inode->i_private = kn;
inode->i_mapping->a_ops = &kernfs_aops;
inode->i_op = &kernfs_iops;
- inode->i_generation = kn->generation;
+ inode->i_generation = kn->id.generation;
set_default_inode_attr(inode, kn->mode);
kernfs_refresh_inode(kn, inode);
@@ -266,7 +266,7 @@ struct inode *kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn)
{
struct inode *inode;
- inode = iget_locked(sb, kn->ino);
+ inode = iget_locked(sb, kn->id.ino);
if (inode && (inode->i_state & I_NEW))
kernfs_init_inode(kn, inode);
@@ -543,7 +543,7 @@ static inline bool cgroup_is_populated(struct cgroup *cgrp)
/* returns ino associated with a cgroup */
static inline ino_t cgroup_ino(struct cgroup *cgrp)
{
- return cgrp->kn->ino;
+ return cgrp->kn->id.ino;
}
/* cft/css accessors for cftype->write() operation */
@@ -95,6 +95,12 @@ struct kernfs_elem_attr {
struct kernfs_node *notify_next; /* for kernfs_notify() */
};
+/* represent a kernfs node */
+struct kernfs_node_id {
+ u64 ino;
+ u32 generation;
+} __attribute__((packed));
+
/*
* kernfs_node - the building block of kernfs hierarchy. Each and every
* kernfs node is represented by single kernfs_node. Most fields are
@@ -131,11 +137,10 @@ struct kernfs_node {
void *priv;
+ struct kernfs_node_id id;
unsigned short flags;
umode_t mode;
- unsigned int ino;
struct kernfs_iattrs *iattr;
- u32 generation;
};
/*