Message ID | 20160919105325.GW5016@twins.programming.kicks-ass.net (mailing list archive) |
---|---|
State | Accepted, archived |
Delegated to: | Mike Snitzer |
Headers | show |
On Mon, 19 Sep 2016, Peter Zijlstra wrote: > On Tue, Sep 13, 2016 at 09:39:59AM -0400, Mike Snitzer wrote: > > So I'm not sure how this dm-bufio local cond_resched() wrapper still got > > in... happy to take your patch. > > > > Please respond with whatever SOB you'd like applied to the patch header. > > Sorry, for the delay, here goes. Why not change it to might_sleep()? - that would be almost equivalent to the code that was there before (i.e. reschedule only if CONFIG_PREEMPT_VOLUNTARY is set). If we call the cond_resched() function in tight loops such as walking all buffers in a list, there may be performance penalty due to the call, so the call should be done only if it is really needed (i.e. in CONFIG_PREEMPT_VOLUNTARY case). Mikulas > --- > Subject: dm: Remove dm_bufio_cond_resched() > From: Peter Zijlstra <peterz@infradead.org> > Date: Tue, 13 Sep 2016 10:45:20 +0200 > > Remove pointless local wrappery. Use cond_resched() like everybody else. > > Cc: Ingo Molnar <mingo@kernel.org> > Cc: Mikulas Patocka <mpatocka@redhat.com> > Cc: Mike Snitzer <snitzer@redhat.com> > Cc: Alasdair Kergon <agk@redhat.com> > Acked-by: Thomas Gleixner <tglx@linutronix.de> > Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> > --- > drivers/md/dm-bufio.c | 31 +++++++++---------------------- > 1 file changed, 9 insertions(+), 22 deletions(-) > > --- a/drivers/md/dm-bufio.c > +++ b/drivers/md/dm-bufio.c > @@ -191,19 +191,6 @@ static void dm_bufio_unlock(struct dm_bu > mutex_unlock(&c->lock); > } > > -/* > - * FIXME Move to sched.h? > - */ > -#ifdef CONFIG_PREEMPT_VOLUNTARY > -# define dm_bufio_cond_resched() \ > -do { \ > - if (unlikely(need_resched())) \ > - _cond_resched(); \ > -} while (0) > -#else > -# define dm_bufio_cond_resched() do { } while (0) > -#endif > - > /*----------------------------------------------------------------*/ > > /* > @@ -741,7 +728,7 @@ static void __flush_write_list(struct li > list_entry(write_list->next, struct dm_buffer, write_list); > list_del(&b->write_list); > submit_io(b, WRITE, b->block, write_endio); > - dm_bufio_cond_resched(); > + cond_resched(); > } > blk_finish_plug(&plug); > } > @@ -780,7 +767,7 @@ static struct dm_buffer *__get_unclaimed > __unlink_buffer(b); > return b; > } > - dm_bufio_cond_resched(); > + cond_resched(); > } > > list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) { > @@ -791,7 +778,7 @@ static struct dm_buffer *__get_unclaimed > __unlink_buffer(b); > return b; > } > - dm_bufio_cond_resched(); > + cond_resched(); > } > > return NULL; > @@ -923,7 +910,7 @@ static void __write_dirty_buffers_async( > return; > > __write_dirty_buffer(b, write_list); > - dm_bufio_cond_resched(); > + cond_resched(); > } > } > > @@ -973,7 +960,7 @@ static void __check_watermark(struct dm_ > return; > > __free_buffer_wake(b); > - dm_bufio_cond_resched(); > + cond_resched(); > } > > if (c->n_buffers[LIST_DIRTY] > threshold_buffers) > @@ -1170,7 +1157,7 @@ void dm_bufio_prefetch(struct dm_bufio_c > submit_io(b, READ, b->block, read_endio); > dm_bufio_release(b); > > - dm_bufio_cond_resched(); > + cond_resched(); > > if (!n_blocks) > goto flush_plug; > @@ -1291,7 +1278,7 @@ int dm_bufio_write_dirty_buffers(struct > !test_bit(B_WRITING, &b->state)) > __relink_lru(b, LIST_CLEAN); > > - dm_bufio_cond_resched(); > + cond_resched(); > > /* > * If we dropped the lock, the list is no longer consistent, > @@ -1574,7 +1561,7 @@ static unsigned long __scan(struct dm_bu > freed++; > if (!--nr_to_scan || ((count - freed) <= retain_target)) > return freed; > - dm_bufio_cond_resched(); > + cond_resched(); > } > } > return freed; > @@ -1808,7 +1795,7 @@ static void __evict_old_buffers(struct d > if (__try_evict_buffer(b, 0)) > count--; > > - dm_bufio_cond_resched(); > + cond_resched(); > } > > dm_bufio_unlock(c); > -- dm-devel mailing list dm-devel@redhat.com https://www.redhat.com/mailman/listinfo/dm-devel
On Thu, 22 Sep 2016, Mikulas Patocka wrote: > On Mon, 19 Sep 2016, Peter Zijlstra wrote: > > > On Tue, Sep 13, 2016 at 09:39:59AM -0400, Mike Snitzer wrote: > > > So I'm not sure how this dm-bufio local cond_resched() wrapper still got > > > in... happy to take your patch. > > > > > > Please respond with whatever SOB you'd like applied to the patch header. > > > > Sorry, for the delay, here goes. > > Why not change it to might_sleep()? - that would be almost equivalent to You mean might_resched(). might_sleep() is not even remotely equivalent. > If we call the cond_resched() function in tight loops such as walking all > buffers in a list, there may be performance penalty due to the call, so > the call should be done only if it is really needed (i.e. in > CONFIG_PREEMPT_VOLUNTARY case). Makes sense. Thanks, tglx -- dm-devel mailing list dm-devel@redhat.com https://www.redhat.com/mailman/listinfo/dm-devel
On Thu, Sep 22, 2016 at 10:59:30PM +0200, Thomas Gleixner wrote: > On Thu, 22 Sep 2016, Mikulas Patocka wrote: > > On Mon, 19 Sep 2016, Peter Zijlstra wrote: > > > > > On Tue, Sep 13, 2016 at 09:39:59AM -0400, Mike Snitzer wrote: > > > > So I'm not sure how this dm-bufio local cond_resched() wrapper still got > > > > in... happy to take your patch. > > > > > > > > Please respond with whatever SOB you'd like applied to the patch header. > > > > > > Sorry, for the delay, here goes. > > > > Why not change it to might_sleep()? - that would be almost equivalent to > > You mean might_resched(). might_sleep() is not even remotely equivalent. It is, might_sleep() implies might_resched(). In fact, that's all what PREEMPT_VOLUNTARY is, make the might_sleep() debug test imply a resched point. > > If we call the cond_resched() function in tight loops such as walking all > > buffers in a list, there may be performance penalty due to the call, so > > the call should be done only if it is really needed (i.e. in > > CONFIG_PREEMPT_VOLUNTARY case). > > Makes sense. Is anybody still using PREEMPT_NONE? Most workloads also care about latency to some extend. Lots of code has explicit cond_resched() and doesn't worry. -- dm-devel mailing list dm-devel@redhat.com https://www.redhat.com/mailman/listinfo/dm-devel
On Fri, 23 Sep 2016, Peter Zijlstra wrote: > On Thu, Sep 22, 2016 at 10:59:30PM +0200, Thomas Gleixner wrote: > > On Thu, 22 Sep 2016, Mikulas Patocka wrote: > > > On Mon, 19 Sep 2016, Peter Zijlstra wrote: > > > > > > > On Tue, Sep 13, 2016 at 09:39:59AM -0400, Mike Snitzer wrote: > > > > > So I'm not sure how this dm-bufio local cond_resched() wrapper still got > > > > > in... happy to take your patch. > > > > > > > > > > Please respond with whatever SOB you'd like applied to the patch header. > > > > > > > > Sorry, for the delay, here goes. > > > > > > Why not change it to might_sleep()? - that would be almost equivalent to > > > > You mean might_resched(). might_sleep() is not even remotely equivalent. > > It is, might_sleep() implies might_resched(). In fact, that's all what > PREEMPT_VOLUNTARY is, make the might_sleep() debug test imply a resched > point. Grr, how intuitive - NOT! > > > If we call the cond_resched() function in tight loops such as walking all > > > buffers in a list, there may be performance penalty due to the call, so > > > the call should be done only if it is really needed (i.e. in > > > CONFIG_PREEMPT_VOLUNTARY case). > > > > Makes sense. > > Is anybody still using PREEMPT_NONE? Most workloads also care about > latency to some extend. Lots of code has explicit cond_resched() and > doesn't worry. Dunno. But I bet there are workloads which love it. Thanks, tglx -- dm-devel mailing list dm-devel@redhat.com https://www.redhat.com/mailman/listinfo/dm-devel
On Fri, Sep 23, 2016 at 10:00:37AM +0200, Thomas Gleixner wrote: > On Fri, 23 Sep 2016, Peter Zijlstra wrote: > > It is, might_sleep() implies might_resched(). In fact, that's all what > > PREEMPT_VOLUNTARY is, make the might_sleep() debug test imply a resched > > point. > > Grr, how intuitive - NOT! No, it actually makes sense. Because you 'obviously' only call might_sleep() in contexts that should be able to sleep (if not, it'll holler). So they're already placed right for preemption. -- dm-devel mailing list dm-devel@redhat.com https://www.redhat.com/mailman/listinfo/dm-devel
On Fri, 23 Sep 2016, Peter Zijlstra wrote: > On Fri, Sep 23, 2016 at 10:00:37AM +0200, Thomas Gleixner wrote: > > On Fri, 23 Sep 2016, Peter Zijlstra wrote: > > > It is, might_sleep() implies might_resched(). In fact, that's all what > > > PREEMPT_VOLUNTARY is, make the might_sleep() debug test imply a resched > > > point. > > > > Grr, how intuitive - NOT! > > No, it actually makes sense. Because you 'obviously' only call > might_sleep() in contexts that should be able to sleep (if not, it'll > holler). So they're already placed right for preemption. I disagree. might_sleep() is commonly known as a debug mechanism and it existed before the preemption stuff went in. So the easy way to sprinkle preemption points into the kernel was to hijack might_sleep(). I know it's historical, but that doesnt make it any more intuitive. Thanks, tglx -- dm-devel mailing list dm-devel@redhat.com https://www.redhat.com/mailman/listinfo/dm-devel
* Thomas Gleixner <tglx@linutronix.de> wrote: > On Fri, 23 Sep 2016, Peter Zijlstra wrote: > > On Fri, Sep 23, 2016 at 10:00:37AM +0200, Thomas Gleixner wrote: > > > On Fri, 23 Sep 2016, Peter Zijlstra wrote: > > > > It is, might_sleep() implies might_resched(). In fact, that's all what > > > > PREEMPT_VOLUNTARY is, make the might_sleep() debug test imply a resched > > > > point. > > > > > > Grr, how intuitive - NOT! > > > > No, it actually makes sense. Because you 'obviously' only call > > might_sleep() in contexts that should be able to sleep (if not, it'll > > holler). So they're already placed right for preemption. > > I disagree. might_sleep() is commonly known as a debug mechanism and it > existed before the preemption stuff went in. So the easy way to sprinkle > preemption points into the kernel was to hijack might_sleep(). I know it's > historical, but that doesnt make it any more intuitive. If we rename it to might_as_well_sleep() it becomes more intuitive! ;-) Thanks, Ingo -- dm-devel mailing list dm-devel@redhat.com https://www.redhat.com/mailman/listinfo/dm-devel
On Fri, 2016-09-23 at 10:00 +0200, Thomas Gleixner wrote: > On Fri, 23 Sep 2016, Peter Zijlstra wrote: > > Is anybody still using PREEMPT_NONE? Most workloads also care about > > latency to some extend. Lots of code has explicit cond_resched() and > > doesn't worry. > > Dunno. But I bet there are workloads which love it. SUSE definitely uses it. I had presumed that was enterprise standard. -Mike -- dm-devel mailing list dm-devel@redhat.com https://www.redhat.com/mailman/listinfo/dm-devel
On Fri, Sep 23, 2016 at 02:17:10PM +0200, Mike Galbraith wrote: > On Fri, 2016-09-23 at 10:00 +0200, Thomas Gleixner wrote: > > On Fri, 23 Sep 2016, Peter Zijlstra wrote: > > > > Is anybody still using PREEMPT_NONE? Most workloads also care about > > > latency to some extend. Lots of code has explicit cond_resched() and > > > doesn't worry. > > > > Dunno. But I bet there are workloads which love it. > > SUSE definitely uses it. I had presumed that was enterprise standard. Hmm, I thought most distros defaulted to PREEMPT_VOLUNTARY. -- dm-devel mailing list dm-devel@redhat.com https://www.redhat.com/mailman/listinfo/dm-devel
On Fri, 2016-09-23 at 14:26 +0200, Peter Zijlstra wrote: > On Fri, Sep 23, 2016 at 02:17:10PM +0200, Mike Galbraith wrote: > > On Fri, 2016-09-23 at 10:00 +0200, Thomas Gleixner wrote: > > > On Fri, 23 Sep 2016, Peter Zijlstra wrote: > > > > > > Is anybody still using PREEMPT_NONE? Most workloads also care > > > > about > > > > latency to some extend. Lots of code has explicit > > > > cond_resched() and > > > > doesn't worry. > > > > > > Dunno. But I bet there are workloads which love it. > > > > SUSE definitely uses it. I had presumed that was enterprise > > standard. > > Hmm, I thought most distros defaulted to PREEMPT_VOLUNTARY. I use PREEMPT_VOLUNTARY for my desktop, that offering much better performance than the PREEMPT desktop targeted kernels (ick), but workhorses run PREEMPT_NONE for maximum throughput. -Mike -- dm-devel mailing list dm-devel@redhat.com https://www.redhat.com/mailman/listinfo/dm-devel
On Fri, Sep 23 2016 at 8:26am -0400, Peter Zijlstra <peterz@infradead.org> wrote: > On Fri, Sep 23, 2016 at 02:17:10PM +0200, Mike Galbraith wrote: > > On Fri, 2016-09-23 at 10:00 +0200, Thomas Gleixner wrote: > > > On Fri, 23 Sep 2016, Peter Zijlstra wrote: > > > > > > Is anybody still using PREEMPT_NONE? Most workloads also care about > > > > latency to some extend. Lots of code has explicit cond_resched() and > > > > doesn't worry. > > > > > > Dunno. But I bet there are workloads which love it. > > > > SUSE definitely uses it. I had presumed that was enterprise standard. > > Hmm, I thought most distros defaulted to PREEMPT_VOLUNTARY. So what is the concensus on this? Switch dm-bufio's cond_resched calls (in peter's patch) to might_sleep()? Or continue using cond_resched but fix cond_resched to do the might_sleep() equivalent if PREEMPT_NONE? Mike -- dm-devel mailing list dm-devel@redhat.com https://www.redhat.com/mailman/listinfo/dm-devel
On Fri, Sep 23, 2016 at 08:42:51AM -0400, Mike Snitzer wrote: > On Fri, Sep 23 2016 at 8:26am -0400, > Peter Zijlstra <peterz@infradead.org> wrote: > > > On Fri, Sep 23, 2016 at 02:17:10PM +0200, Mike Galbraith wrote: > > > On Fri, 2016-09-23 at 10:00 +0200, Thomas Gleixner wrote: > > > > On Fri, 23 Sep 2016, Peter Zijlstra wrote: > > > > > > > > Is anybody still using PREEMPT_NONE? Most workloads also care about > > > > > latency to some extend. Lots of code has explicit cond_resched() and > > > > > doesn't worry. > > > > > > > > Dunno. But I bet there are workloads which love it. > > > > > > SUSE definitely uses it. I had presumed that was enterprise standard. > > > > Hmm, I thought most distros defaulted to PREEMPT_VOLUNTARY. > > So what is the concensus on this? Switch dm-bufio's cond_resched calls > (in peter's patch) to might_sleep()? Or continue using cond_resched but > fix cond_resched to do the might_sleep() equivalent if PREEMPT_NONE? I'd go with the one I posted and look again if ever a performance issue shows up. -- dm-devel mailing list dm-devel@redhat.com https://www.redhat.com/mailman/listinfo/dm-devel
On 09/23/16 00:34, Peter Zijlstra wrote: > Is anybody still using PREEMPT_NONE? Most workloads also care about > latency to some extend. Lots of code has explicit cond_resched() and > doesn't worry. From a SLES11 system: $ grep PREEMPT_NONE /boot/config-3.0.101-0.47.67-default CONFIG_PREEMPT_NONE=y From a SLES12 system: $ grep CONFIG_PREEMPT_NONE /boot/config-4.4.16-56-default CONFIG_PREEMPT_NONE=y Bart. -- dm-devel mailing list dm-devel@redhat.com https://www.redhat.com/mailman/listinfo/dm-devel
--- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -191,19 +191,6 @@ static void dm_bufio_unlock(struct dm_bu mutex_unlock(&c->lock); } -/* - * FIXME Move to sched.h? - */ -#ifdef CONFIG_PREEMPT_VOLUNTARY -# define dm_bufio_cond_resched() \ -do { \ - if (unlikely(need_resched())) \ - _cond_resched(); \ -} while (0) -#else -# define dm_bufio_cond_resched() do { } while (0) -#endif - /*----------------------------------------------------------------*/ /* @@ -741,7 +728,7 @@ static void __flush_write_list(struct li list_entry(write_list->next, struct dm_buffer, write_list); list_del(&b->write_list); submit_io(b, WRITE, b->block, write_endio); - dm_bufio_cond_resched(); + cond_resched(); } blk_finish_plug(&plug); } @@ -780,7 +767,7 @@ static struct dm_buffer *__get_unclaimed __unlink_buffer(b); return b; } - dm_bufio_cond_resched(); + cond_resched(); } list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) { @@ -791,7 +778,7 @@ static struct dm_buffer *__get_unclaimed __unlink_buffer(b); return b; } - dm_bufio_cond_resched(); + cond_resched(); } return NULL; @@ -923,7 +910,7 @@ static void __write_dirty_buffers_async( return; __write_dirty_buffer(b, write_list); - dm_bufio_cond_resched(); + cond_resched(); } } @@ -973,7 +960,7 @@ static void __check_watermark(struct dm_ return; __free_buffer_wake(b); - dm_bufio_cond_resched(); + cond_resched(); } if (c->n_buffers[LIST_DIRTY] > threshold_buffers) @@ -1170,7 +1157,7 @@ void dm_bufio_prefetch(struct dm_bufio_c submit_io(b, READ, b->block, read_endio); dm_bufio_release(b); - dm_bufio_cond_resched(); + cond_resched(); if (!n_blocks) goto flush_plug; @@ -1291,7 +1278,7 @@ int dm_bufio_write_dirty_buffers(struct !test_bit(B_WRITING, &b->state)) __relink_lru(b, LIST_CLEAN); - dm_bufio_cond_resched(); + cond_resched(); /* * If we dropped the lock, the list is no longer consistent, @@ -1574,7 +1561,7 @@ static unsigned long __scan(struct dm_bu freed++; if (!--nr_to_scan || ((count - freed) <= retain_target)) return freed; - dm_bufio_cond_resched(); + cond_resched(); } } return freed; @@ -1808,7 +1795,7 @@ static void __evict_old_buffers(struct d if (__try_evict_buffer(b, 0)) count--; - dm_bufio_cond_resched(); + cond_resched(); } dm_bufio_unlock(c);