Message ID | 4A4850D3.3000700@cn.fujitsu.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Mon, Jun 29, 2009 at 01:27:47PM +0800, Gui Jianfeng wrote: > Hi Vivek, > > There's no need to travel the iocg->group_data for each iog > when exiting a elevator, that costs too much. An alternative > solution is reseting iocg_id as soon as an io group unlinking > from a iocg. Make a decision that whether it's need to carry > out deleting action by checking iocg_id. > Thanks Gui. This makes sense to me. We can check iog->iocg_id to determine wheter group is still on iocg list or not instead of traversing the list. Nauman, do you see any issues with the patch? Thanks Vivek > Signed-off-by: Gui Jianfeng <guijianfeng@cn.fujitsu.com> > --- > block/elevator-fq.c | 29 ++++++++++------------------- > 1 files changed, 10 insertions(+), 19 deletions(-) > > diff --git a/block/elevator-fq.c b/block/elevator-fq.c > index d779282..b26fe0f 100644 > --- a/block/elevator-fq.c > +++ b/block/elevator-fq.c > @@ -2218,8 +2218,6 @@ void io_group_cleanup(struct io_group *iog) > BUG_ON(iog->sched_data.active_entity != NULL); > BUG_ON(entity != NULL && entity->tree != NULL); > > - iog->iocg_id = 0; > - > /* > * Wait for any rcu readers to exit before freeing up the group. > * Primarily useful when io_get_io_group() is called without queue > @@ -2376,6 +2374,7 @@ remove_entry: > group_node); > efqd = rcu_dereference(iog->key); > hlist_del_rcu(&iog->group_node); > + iog->iocg_id = 0; > spin_unlock_irqrestore(&iocg->lock, flags); > > spin_lock_irqsave(efqd->queue->queue_lock, flags); > @@ -2403,35 +2402,27 @@ done: > void io_group_check_and_destroy(struct elv_fq_data *efqd, struct io_group *iog) > { > struct io_cgroup *iocg; > - unsigned short id = iog->iocg_id; > - struct hlist_node *n; > - struct io_group *__iog; > unsigned long flags; > struct cgroup_subsys_state *css; > > rcu_read_lock(); > > - BUG_ON(!id); > - css = css_lookup(&io_subsys, id); > + css = css_lookup(&io_subsys, iog->iocg_id); > > - /* css can't go away as associated io group is still around */ > - BUG_ON(!css); > + if (!css) > + goto out; > > iocg = container_of(css, struct io_cgroup, css); > > spin_lock_irqsave(&iocg->lock, flags); > - hlist_for_each_entry_rcu(__iog, n, &iocg->group_data, group_node) { > - /* > - * Remove iog only if it is still in iocg list. Cgroup > - * deletion could have deleted it already. > - */ > - if (__iog == iog) { > - hlist_del_rcu(&iog->group_node); > - __io_destroy_group(efqd, iog); > - break; > - } > + > + if (iog->iocg_id) { > + hlist_del_rcu(&iog->group_node); > + __io_destroy_group(efqd, iog); > } > + > spin_unlock_irqrestore(&iocg->lock, flags); > +out: > rcu_read_unlock(); > } > > -- 1.5.4.rc3 -- dm-devel mailing list dm-devel@redhat.com https://www.redhat.com/mailman/listinfo/dm-devel
On Mon, Jun 29, 2009 at 7:06 AM, Vivek Goyal<vgoyal@redhat.com> wrote: > On Mon, Jun 29, 2009 at 01:27:47PM +0800, Gui Jianfeng wrote: >> Hi Vivek, >> >> There's no need to travel the iocg->group_data for each iog >> when exiting a elevator, that costs too much. An alternative >> solution is reseting iocg_id as soon as an io group unlinking >> from a iocg. Make a decision that whether it's need to carry >> out deleting action by checking iocg_id. >> > > Thanks Gui. This makes sense to me. We can check iog->iocg_id to determine > wheter group is still on iocg list or not instead of traversing the list. > > Nauman, do you see any issues with the patch? Looks like this should work. The only iog with zero id is associated with root group, which gets deleted outside of this function anyways. > > Thanks > Vivek > >> Signed-off-by: Gui Jianfeng <guijianfeng@cn.fujitsu.com> >> --- >> Â block/elevator-fq.c | Â 29 ++++++++++------------------- >> Â 1 files changed, 10 insertions(+), 19 deletions(-) >> >> diff --git a/block/elevator-fq.c b/block/elevator-fq.c >> index d779282..b26fe0f 100644 >> --- a/block/elevator-fq.c >> +++ b/block/elevator-fq.c >> @@ -2218,8 +2218,6 @@ void io_group_cleanup(struct io_group *iog) >> Â Â Â BUG_ON(iog->sched_data.active_entity != NULL); >> Â Â Â BUG_ON(entity != NULL && entity->tree != NULL); >> >> - Â Â iog->iocg_id = 0; >> - >> Â Â Â /* >> Â Â Â Â * Wait for any rcu readers to exit before freeing up the group. >> Â Â Â Â * Primarily useful when io_get_io_group() is called without queue >> @@ -2376,6 +2374,7 @@ remove_entry: >> Â Â Â Â Â Â Â Â Â Â Â Â group_node); >> Â Â Â efqd = rcu_dereference(iog->key); >> Â Â Â hlist_del_rcu(&iog->group_node); >> + Â Â iog->iocg_id = 0; >> Â Â Â spin_unlock_irqrestore(&iocg->lock, flags); >> >> Â Â Â spin_lock_irqsave(efqd->queue->queue_lock, flags); >> @@ -2403,35 +2402,27 @@ done: >> Â void io_group_check_and_destroy(struct elv_fq_data *efqd, struct io_group *iog) >> Â { >> Â Â Â struct io_cgroup *iocg; >> - Â Â unsigned short id = iog->iocg_id; >> - Â Â struct hlist_node *n; >> - Â Â struct io_group *__iog; >> Â Â Â unsigned long flags; >> Â Â Â struct cgroup_subsys_state *css; >> >> Â Â Â rcu_read_lock(); >> >> - Â Â BUG_ON(!id); >> - Â Â css = css_lookup(&io_subsys, id); >> + Â Â css = css_lookup(&io_subsys, iog->iocg_id); >> >> - Â Â /* css can't go away as associated io group is still around */ >> - Â Â BUG_ON(!css); >> + Â Â if (!css) >> + Â Â Â Â Â Â goto out; >> >> Â Â Â iocg = container_of(css, struct io_cgroup, css); >> >> Â Â Â spin_lock_irqsave(&iocg->lock, flags); >> - Â Â hlist_for_each_entry_rcu(__iog, n, &iocg->group_data, group_node) { >> - Â Â Â Â Â Â /* >> - Â Â Â Â Â Â Â * Remove iog only if it is still in iocg list. Cgroup >> - Â Â Â Â Â Â Â * deletion could have deleted it already. >> - Â Â Â Â Â Â Â */ >> - Â Â Â Â Â Â if (__iog == iog) { >> - Â Â Â Â Â Â Â Â Â Â hlist_del_rcu(&iog->group_node); >> - Â Â Â Â Â Â Â Â Â Â __io_destroy_group(efqd, iog); >> - Â Â Â Â Â Â Â Â Â Â break; >> - Â Â Â Â Â Â } >> + >> + Â Â if (iog->iocg_id) { >> + Â Â Â Â Â Â hlist_del_rcu(&iog->group_node); >> + Â Â Â Â Â Â __io_destroy_group(efqd, iog); >> Â Â Â } >> + >> Â Â Â spin_unlock_irqrestore(&iocg->lock, flags); >> +out: >> Â Â Â rcu_read_unlock(); >> Â } >> >> -- 1.5.4.rc3 > -- dm-devel mailing list dm-devel@redhat.com https://www.redhat.com/mailman/listinfo/dm-devel
On Tue, Jun 30, 2009 at 10:14:48AM -0700, Nauman Rafique wrote: > On Mon, Jun 29, 2009 at 7:06 AM, Vivek Goyal<vgoyal@redhat.com> wrote: > > On Mon, Jun 29, 2009 at 01:27:47PM +0800, Gui Jianfeng wrote: > >> Hi Vivek, > >> > >> There's no need to travel the iocg->group_data for each iog > >> when exiting a elevator, that costs too much. An alternative > >> solution is reseting iocg_id as soon as an io group unlinking > >> from a iocg. Make a decision that whether it's need to carry > >> out deleting action by checking iocg_id. > >> > > > > Thanks Gui. This makes sense to me. We can check iog->iocg_id to determine > > wheter group is still on iocg list or not instead of traversing the list. > > > > Nauman, do you see any issues with the patch? > > Looks like this should work. The only iog with zero id is associated > with root group, which gets deleted outside of this function anyways. > Minor correction. Even root group has id "1" and not zero. 0 is associated with error when cgroup is not present. Thanks Vivek -- dm-devel mailing list dm-devel@redhat.com https://www.redhat.com/mailman/listinfo/dm-devel
diff --git a/block/elevator-fq.c b/block/elevator-fq.c index d779282..b26fe0f 100644 --- a/block/elevator-fq.c +++ b/block/elevator-fq.c @@ -2218,8 +2218,6 @@ void io_group_cleanup(struct io_group *iog) BUG_ON(iog->sched_data.active_entity != NULL); BUG_ON(entity != NULL && entity->tree != NULL); - iog->iocg_id = 0; - /* * Wait for any rcu readers to exit before freeing up the group. * Primarily useful when io_get_io_group() is called without queue @@ -2376,6 +2374,7 @@ remove_entry: group_node); efqd = rcu_dereference(iog->key); hlist_del_rcu(&iog->group_node); + iog->iocg_id = 0; spin_unlock_irqrestore(&iocg->lock, flags); spin_lock_irqsave(efqd->queue->queue_lock, flags); @@ -2403,35 +2402,27 @@ done: void io_group_check_and_destroy(struct elv_fq_data *efqd, struct io_group *iog) { struct io_cgroup *iocg; - unsigned short id = iog->iocg_id; - struct hlist_node *n; - struct io_group *__iog; unsigned long flags; struct cgroup_subsys_state *css; rcu_read_lock(); - BUG_ON(!id); - css = css_lookup(&io_subsys, id); + css = css_lookup(&io_subsys, iog->iocg_id); - /* css can't go away as associated io group is still around */ - BUG_ON(!css); + if (!css) + goto out; iocg = container_of(css, struct io_cgroup, css); spin_lock_irqsave(&iocg->lock, flags); - hlist_for_each_entry_rcu(__iog, n, &iocg->group_data, group_node) { - /* - * Remove iog only if it is still in iocg list. Cgroup - * deletion could have deleted it already. - */ - if (__iog == iog) { - hlist_del_rcu(&iog->group_node); - __io_destroy_group(efqd, iog); - break; - } + + if (iog->iocg_id) { + hlist_del_rcu(&iog->group_node); + __io_destroy_group(efqd, iog); } + spin_unlock_irqrestore(&iocg->lock, flags); +out: rcu_read_unlock(); }
Hi Vivek, There's no need to travel the iocg->group_data for each iog when exiting a elevator, that costs too much. An alternative solution is reseting iocg_id as soon as an io group unlinking from a iocg. Make a decision that whether it's need to carry out deleting action by checking iocg_id. Signed-off-by: Gui Jianfeng <guijianfeng@cn.fujitsu.com> --- block/elevator-fq.c | 29 ++++++++++------------------- 1 files changed, 10 insertions(+), 19 deletions(-) -- 1.5.4.rc3 -- dm-devel mailing list dm-devel@redhat.com https://www.redhat.com/mailman/listinfo/dm-devel