diff mbox

[RFC] dm: Remove dm_bufio_cond_resched()

Message ID 20160913084520.GA5012@twins.programming.kicks-ass.net (mailing list archive)
State Superseded, archived
Delegated to: Mike Snitzer
Headers show

Commit Message

Peter Zijlstra Sept. 13, 2016, 8:45 a.m. UTC
Hi all,

While grepping for PREEMPT_VOLUNTARY I ran into dm_bufio_cond_resched()
and wondered WTH it was about.

Is there anything wrong with the below patch?

---

--
dm-devel mailing list
dm-devel@redhat.com
https://www.redhat.com/mailman/listinfo/dm-devel

Comments

Thomas Gleixner Sept. 13, 2016, 12:49 p.m. UTC | #1
On Tue, 13 Sep 2016, Peter Zijlstra wrote:

> Hi all,
> 
> While grepping for PREEMPT_VOLUNTARY I ran into dm_bufio_cond_resched()
> and wondered WTH it was about.
> 
> Is there anything wrong with the below patch?

Not at all, except that you forgot to add your SOB to it :)

Acked-by: Thomas Gleixner <tglx@linutronix.de>

--
dm-devel mailing list
dm-devel@redhat.com
https://www.redhat.com/mailman/listinfo/dm-devel
Mike Snitzer Sept. 13, 2016, 1:39 p.m. UTC | #2
On Tue, Sep 13 2016 at  4:45am -0400,
Peter Zijlstra <peterz@infradead.org> wrote:

> Hi all,
> 
> While grepping for PREEMPT_VOLUNTARY I ran into dm_bufio_cond_resched()
> and wondered WTH it was about.
> 
> Is there anything wrong with the below patch?

No, I'll pick it up for 4.9 merge.  Mikulas added it for sparc or
something.  I cannot recall _the_ reason (I wasn't maintaining DM back
then) but at the time both Alasdair and Joe Thornber reasoned that it
needed to go -- and that if it was really needed that it should be done
in terms of a proper patch to sched.h, etc.

So I'm not sure how this dm-bufio local cond_resched() wrapper still got
in... happy to take your patch.

Please respond with whatever SOB you'd like applied to the patch header.

Thanks,
Mike

--
dm-devel mailing list
dm-devel@redhat.com
https://www.redhat.com/mailman/listinfo/dm-devel
Mikulas Patocka Sept. 19, 2016, 9:49 a.m. UTC | #3
On Tue, 13 Sep 2016, Peter Zijlstra wrote:

> Hi all,
> 
> While grepping for PREEMPT_VOLUNTARY I ran into dm_bufio_cond_resched()
> and wondered WTH it was about.

cond_resched() calls _cond_resched() even if when we have a preemptive 
kernel - with preemptive kernel, calling cond_resched is pointless because 
rescheduling is done peemtively.

So, I added that dm_bufio_cond_resched(), that does nothing on peemptive 
kernels (and also on PREEMPT_NONE kernels where the user doesn't care 
about latency).

What is the reason why cond_resched() tests for rescheduling with 
preemptive kernel? Why should I use cond_resched() in that case?

Mikulas

> Is there anything wrong with the below patch?
> 
> ---
> diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
> index 8625040bae92..125aedc3875f 100644
> --- a/drivers/md/dm-bufio.c
> +++ b/drivers/md/dm-bufio.c
> @@ -191,19 +191,6 @@ static void dm_bufio_unlock(struct dm_bufio_client *c)
>  	mutex_unlock(&c->lock);
>  }
>  
> -/*
> - * FIXME Move to sched.h?
> - */
> -#ifdef CONFIG_PREEMPT_VOLUNTARY
> -#  define dm_bufio_cond_resched()		\
> -do {						\
> -	if (unlikely(need_resched()))		\
> -		_cond_resched();		\
> -} while (0)
> -#else
> -#  define dm_bufio_cond_resched()                do { } while (0)
> -#endif
> -
>  /*----------------------------------------------------------------*/
>  
>  /*
> @@ -741,7 +728,7 @@ static void __flush_write_list(struct list_head *write_list)
>  			list_entry(write_list->next, struct dm_buffer, write_list);
>  		list_del(&b->write_list);
>  		submit_io(b, WRITE, b->block, write_endio);
> -		dm_bufio_cond_resched();
> +		cond_resched();
>  	}
>  	blk_finish_plug(&plug);
>  }
> @@ -780,7 +767,7 @@ static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
>  			__unlink_buffer(b);
>  			return b;
>  		}
> -		dm_bufio_cond_resched();
> +		cond_resched();
>  	}
>  
>  	list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
> @@ -791,7 +778,7 @@ static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
>  			__unlink_buffer(b);
>  			return b;
>  		}
> -		dm_bufio_cond_resched();
> +		cond_resched();
>  	}
>  
>  	return NULL;
> @@ -923,7 +910,7 @@ static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
>  			return;
>  
>  		__write_dirty_buffer(b, write_list);
> -		dm_bufio_cond_resched();
> +		cond_resched();
>  	}
>  }
>  
> @@ -973,7 +960,7 @@ static void __check_watermark(struct dm_bufio_client *c,
>  			return;
>  
>  		__free_buffer_wake(b);
> -		dm_bufio_cond_resched();
> +		cond_resched();
>  	}
>  
>  	if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
> @@ -1170,7 +1157,7 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
>  				submit_io(b, READ, b->block, read_endio);
>  			dm_bufio_release(b);
>  
> -			dm_bufio_cond_resched();
> +			cond_resched();
>  
>  			if (!n_blocks)
>  				goto flush_plug;
> @@ -1291,7 +1278,7 @@ int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
>  		    !test_bit(B_WRITING, &b->state))
>  			__relink_lru(b, LIST_CLEAN);
>  
> -		dm_bufio_cond_resched();
> +		cond_resched();
>  
>  		/*
>  		 * If we dropped the lock, the list is no longer consistent,
> @@ -1574,7 +1561,7 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
>  				freed++;
>  			if (!--nr_to_scan || ((count - freed) <= retain_target))
>  				return freed;
> -			dm_bufio_cond_resched();
> +			cond_resched();
>  		}
>  	}
>  	return freed;
> @@ -1808,7 +1795,7 @@ static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
>  		if (__try_evict_buffer(b, 0))
>  			count--;
>  
> -		dm_bufio_cond_resched();
> +		cond_resched();
>  	}
>  
>  	dm_bufio_unlock(c);
> 

--
dm-devel mailing list
dm-devel@redhat.com
https://www.redhat.com/mailman/listinfo/dm-devel
Peter Zijlstra Sept. 19, 2016, 10:47 a.m. UTC | #4
On Mon, Sep 19, 2016 at 05:49:07AM -0400, Mikulas Patocka wrote:
> 
> 
> On Tue, 13 Sep 2016, Peter Zijlstra wrote:
> 
> > Hi all,
> > 
> > While grepping for PREEMPT_VOLUNTARY I ran into dm_bufio_cond_resched()
> > and wondered WTH it was about.
> 
> cond_resched() calls _cond_resched() even if when we have a preemptive 
> kernel - with preemptive kernel, calling cond_resched is pointless because 
> rescheduling is done peemtively.
> 
> So, I added that dm_bufio_cond_resched(), that does nothing on peemptive 
> kernels (and also on PREEMPT_NONE kernels where the user doesn't care 
> about latency).
> 
> What is the reason why cond_resched() tests for rescheduling with 
> preemptive kernel? Why should I use cond_resched() in that case?

Because every body else does too. 'Fixing' something like that in the dm
code is entirely the wrong place. Also, you loose out on the
might_sleep() warning implied in it.

As it happens, I have a patch fixing that somewhere, let me try and get
it merged.

But thanks for the reminder, I'll go write a Changelog for this so that
people can commit.

--
dm-devel mailing list
dm-devel@redhat.com
https://www.redhat.com/mailman/listinfo/dm-devel
diff mbox

Patch

diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 8625040bae92..125aedc3875f 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -191,19 +191,6 @@  static void dm_bufio_unlock(struct dm_bufio_client *c)
 	mutex_unlock(&c->lock);
 }
 
-/*
- * FIXME Move to sched.h?
- */
-#ifdef CONFIG_PREEMPT_VOLUNTARY
-#  define dm_bufio_cond_resched()		\
-do {						\
-	if (unlikely(need_resched()))		\
-		_cond_resched();		\
-} while (0)
-#else
-#  define dm_bufio_cond_resched()                do { } while (0)
-#endif
-
 /*----------------------------------------------------------------*/
 
 /*
@@ -741,7 +728,7 @@  static void __flush_write_list(struct list_head *write_list)
 			list_entry(write_list->next, struct dm_buffer, write_list);
 		list_del(&b->write_list);
 		submit_io(b, WRITE, b->block, write_endio);
-		dm_bufio_cond_resched();
+		cond_resched();
 	}
 	blk_finish_plug(&plug);
 }
@@ -780,7 +767,7 @@  static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
 			__unlink_buffer(b);
 			return b;
 		}
-		dm_bufio_cond_resched();
+		cond_resched();
 	}
 
 	list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
@@ -791,7 +778,7 @@  static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
 			__unlink_buffer(b);
 			return b;
 		}
-		dm_bufio_cond_resched();
+		cond_resched();
 	}
 
 	return NULL;
@@ -923,7 +910,7 @@  static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
 			return;
 
 		__write_dirty_buffer(b, write_list);
-		dm_bufio_cond_resched();
+		cond_resched();
 	}
 }
 
@@ -973,7 +960,7 @@  static void __check_watermark(struct dm_bufio_client *c,
 			return;
 
 		__free_buffer_wake(b);
-		dm_bufio_cond_resched();
+		cond_resched();
 	}
 
 	if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
@@ -1170,7 +1157,7 @@  void dm_bufio_prefetch(struct dm_bufio_client *c,
 				submit_io(b, READ, b->block, read_endio);
 			dm_bufio_release(b);
 
-			dm_bufio_cond_resched();
+			cond_resched();
 
 			if (!n_blocks)
 				goto flush_plug;
@@ -1291,7 +1278,7 @@  int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
 		    !test_bit(B_WRITING, &b->state))
 			__relink_lru(b, LIST_CLEAN);
 
-		dm_bufio_cond_resched();
+		cond_resched();
 
 		/*
 		 * If we dropped the lock, the list is no longer consistent,
@@ -1574,7 +1561,7 @@  static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
 				freed++;
 			if (!--nr_to_scan || ((count - freed) <= retain_target))
 				return freed;
-			dm_bufio_cond_resched();
+			cond_resched();
 		}
 	}
 	return freed;
@@ -1808,7 +1795,7 @@  static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
 		if (__try_evict_buffer(b, 0))
 			count--;
 
-		dm_bufio_cond_resched();
+		cond_resched();
 	}
 
 	dm_bufio_unlock(c);