Message ID | 20200901185257.645114-4-tj@kernel.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [01/27] blk-iocost: ioc_pd_free() shouldn't assume irq disabled | expand |
On Tue, Sep 01, 2020 at 02:52:33PM -0400, Tejun Heo wrote: > blk-iocost has been reading percpu stat counters from remote cpus which on > some archs can lead to torn reads in really rare occassions. Use local[64]_t > for those counters. > > Signed-off-by: Tejun Heo <tj@kernel.org> > --- > block/blk-iocost.c | 37 +++++++++++++++++++++++++++---------- > 1 file changed, 27 insertions(+), 10 deletions(-) > > diff --git a/block/blk-iocost.c b/block/blk-iocost.c > index d37b55db2409..e2266e7692b4 100644 > --- a/block/blk-iocost.c > +++ b/block/blk-iocost.c > @@ -179,6 +179,8 @@ > #include <linux/parser.h> > #include <linux/sched/signal.h> > #include <linux/blk-cgroup.h> > +#include <asm/local.h> > +#include <asm/local64.h> Hi Tejun, FYI, I am just noticing this but this breaks my allyesconfig build on OpenRISC; as 32-bit arch/openrisc doesn't define local64.h In general local64 is slow on 32-bit architectures, would that be a problem with the usage here? Are the calls to local64_* below on critical paths? Either way I will submit a patch in include generic local64.h on OpenRISC, I confirmed it fixes the build. I do not know of anyone using cgroups on OpenRISC systems. -Stafford > #include "blk-rq-qos.h" > #include "blk-stat.h" > #include "blk-wbt.h" > @@ -373,8 +375,8 @@ struct ioc_params { > }; > > struct ioc_missed { > - u32 nr_met; > - u32 nr_missed; > + local_t nr_met; > + local_t nr_missed; > u32 last_met; > u32 last_missed; > }; > @@ -382,7 +384,7 @@ struct ioc_missed { > struct ioc_pcpu_stat { > struct ioc_missed missed[2]; > > - u64 rq_wait_ns; > + local64_t rq_wait_ns; > u64 last_rq_wait_ns; > }; > > @@ -1278,8 +1280,8 @@ static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p > u64 this_rq_wait_ns; > > for (rw = READ; rw <= WRITE; rw++) { > - u32 this_met = READ_ONCE(stat->missed[rw].nr_met); > - u32 this_missed = READ_ONCE(stat->missed[rw].nr_missed); > + u32 this_met = local_read(&stat->missed[rw].nr_met); > + u32 this_missed = local_read(&stat->missed[rw].nr_missed); > > nr_met[rw] += this_met - stat->missed[rw].last_met; > nr_missed[rw] += this_missed - stat->missed[rw].last_missed; > @@ -1287,7 +1289,7 @@ static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p > stat->missed[rw].last_missed = this_missed; > } > > - this_rq_wait_ns = READ_ONCE(stat->rq_wait_ns); > + this_rq_wait_ns = local64_read(&stat->rq_wait_ns); > rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns; > stat->last_rq_wait_ns = this_rq_wait_ns; > } > @@ -1908,6 +1910,7 @@ static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio) > static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq) > { > struct ioc *ioc = rqos_to_ioc(rqos); > + struct ioc_pcpu_stat *ccs; > u64 on_q_ns, rq_wait_ns, size_nsec; > int pidx, rw; > > @@ -1931,13 +1934,17 @@ static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq) > rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns; > size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC); > > + ccs = get_cpu_ptr(ioc->pcpu_stat); > + > if (on_q_ns <= size_nsec || > on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC) > - this_cpu_inc(ioc->pcpu_stat->missed[rw].nr_met); > + local_inc(&ccs->missed[rw].nr_met); > else > - this_cpu_inc(ioc->pcpu_stat->missed[rw].nr_missed); > + local_inc(&ccs->missed[rw].nr_missed); > + > + local64_add(rq_wait_ns, &ccs->rq_wait_ns); > > - this_cpu_add(ioc->pcpu_stat->rq_wait_ns, rq_wait_ns); > + put_cpu_ptr(ccs); > } > > static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos) > @@ -1977,7 +1984,7 @@ static int blk_iocost_init(struct request_queue *q) > { > struct ioc *ioc; > struct rq_qos *rqos; > - int ret; > + int i, cpu, ret; > > ioc = kzalloc(sizeof(*ioc), GFP_KERNEL); > if (!ioc) > @@ -1989,6 +1996,16 @@ static int blk_iocost_init(struct request_queue *q) > return -ENOMEM; > } > > + for_each_possible_cpu(cpu) { > + struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu); > + > + for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) { > + local_set(&ccs->missed[i].nr_met, 0); > + local_set(&ccs->missed[i].nr_missed, 0); > + } > + local64_set(&ccs->rq_wait_ns, 0); > + } > + > rqos = &ioc->rqos; > rqos->id = RQ_QOS_COST; > rqos->ops = &ioc_rqos_ops; > -- > 2.26.2 >
Hello, On Sat, Nov 21, 2020 at 06:51:47AM +0900, Stafford Horne wrote: > FYI, I am just noticing this but this breaks my allyesconfig build > on OpenRISC; as 32-bit arch/openrisc doesn't define local64.h > > In general local64 is slow on 32-bit architectures, would that > be a problem with the usage here? Are the calls to local64_* > below on critical paths? It gets hot when running on really high iops devices but that hopefully isn't a problem for 32bit openrisc. > Either way I will submit a patch in include generic local64.h > on OpenRISC, I confirmed it fixes the build. I do not know of anyone > using cgroups on OpenRISC systems. Yeah, sounds like the right fix. Thanks.
diff --git a/block/blk-iocost.c b/block/blk-iocost.c index d37b55db2409..e2266e7692b4 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -179,6 +179,8 @@ #include <linux/parser.h> #include <linux/sched/signal.h> #include <linux/blk-cgroup.h> +#include <asm/local.h> +#include <asm/local64.h> #include "blk-rq-qos.h" #include "blk-stat.h" #include "blk-wbt.h" @@ -373,8 +375,8 @@ struct ioc_params { }; struct ioc_missed { - u32 nr_met; - u32 nr_missed; + local_t nr_met; + local_t nr_missed; u32 last_met; u32 last_missed; }; @@ -382,7 +384,7 @@ struct ioc_missed { struct ioc_pcpu_stat { struct ioc_missed missed[2]; - u64 rq_wait_ns; + local64_t rq_wait_ns; u64 last_rq_wait_ns; }; @@ -1278,8 +1280,8 @@ static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p u64 this_rq_wait_ns; for (rw = READ; rw <= WRITE; rw++) { - u32 this_met = READ_ONCE(stat->missed[rw].nr_met); - u32 this_missed = READ_ONCE(stat->missed[rw].nr_missed); + u32 this_met = local_read(&stat->missed[rw].nr_met); + u32 this_missed = local_read(&stat->missed[rw].nr_missed); nr_met[rw] += this_met - stat->missed[rw].last_met; nr_missed[rw] += this_missed - stat->missed[rw].last_missed; @@ -1287,7 +1289,7 @@ static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p stat->missed[rw].last_missed = this_missed; } - this_rq_wait_ns = READ_ONCE(stat->rq_wait_ns); + this_rq_wait_ns = local64_read(&stat->rq_wait_ns); rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns; stat->last_rq_wait_ns = this_rq_wait_ns; } @@ -1908,6 +1910,7 @@ static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio) static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq) { struct ioc *ioc = rqos_to_ioc(rqos); + struct ioc_pcpu_stat *ccs; u64 on_q_ns, rq_wait_ns, size_nsec; int pidx, rw; @@ -1931,13 +1934,17 @@ static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq) rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns; size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC); + ccs = get_cpu_ptr(ioc->pcpu_stat); + if (on_q_ns <= size_nsec || on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC) - this_cpu_inc(ioc->pcpu_stat->missed[rw].nr_met); + local_inc(&ccs->missed[rw].nr_met); else - this_cpu_inc(ioc->pcpu_stat->missed[rw].nr_missed); + local_inc(&ccs->missed[rw].nr_missed); + + local64_add(rq_wait_ns, &ccs->rq_wait_ns); - this_cpu_add(ioc->pcpu_stat->rq_wait_ns, rq_wait_ns); + put_cpu_ptr(ccs); } static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos) @@ -1977,7 +1984,7 @@ static int blk_iocost_init(struct request_queue *q) { struct ioc *ioc; struct rq_qos *rqos; - int ret; + int i, cpu, ret; ioc = kzalloc(sizeof(*ioc), GFP_KERNEL); if (!ioc) @@ -1989,6 +1996,16 @@ static int blk_iocost_init(struct request_queue *q) return -ENOMEM; } + for_each_possible_cpu(cpu) { + struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu); + + for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) { + local_set(&ccs->missed[i].nr_met, 0); + local_set(&ccs->missed[i].nr_missed, 0); + } + local64_set(&ccs->rq_wait_ns, 0); + } + rqos = &ioc->rqos; rqos->id = RQ_QOS_COST; rqos->ops = &ioc_rqos_ops;
blk-iocost has been reading percpu stat counters from remote cpus which on some archs can lead to torn reads in really rare occassions. Use local[64]_t for those counters. Signed-off-by: Tejun Heo <tj@kernel.org> --- block/blk-iocost.c | 37 +++++++++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 10 deletions(-)