diff mbox series

[block,2/2] writeback, cgroup: inode_switch_wbs() shouldn't give up on wb_switch_rwsem trylock fail

Message ID 20190802190813.GC136335@devbig004.ftw2.facebook.com (mailing list archive)
State New, archived
Headers show
Series [block,1/2] writeback, cgroup: Adjust WB_FRN_TIME_CUT_DIV to accelerate foreign inode switching | expand

Commit Message

Tejun Heo Aug. 2, 2019, 7:08 p.m. UTC
As inode wb switching may make sync(2) miss some inodes, they're
synchronized using wb_switch_rwsem so that no wb switching happens
while sync(2) is in progress.  In addition to synchronizing the actual
switching, the rwsem is also used to prevent queueing new switch
attempts while sync(2) is in progress.  This is to avoid queueing too
many instances while the rwsem is held by sync(2).  Unfortunately,
this is too agressive and can block wb switching for a long time if
sync(2) is frequent.

The goal is avoiding expolding the number of scheduled switches, not
avoiding scheduling anything.  Let's use wb_switch_rwsem only for
synchronizing the actual switching and sync(2) and use
isw_nr_in_flight instead for limiting the maximum number of scheduled
switches.  The limit is set to 1024 which should be more than enough
while still avoiding extreme situations.

Signed-off-by: Tejun Heo <tj@kernel.org>
---
 fs/fs-writeback.c |   17 +++++------------
 1 file changed, 5 insertions(+), 12 deletions(-)

Comments

Jan Kara Aug. 15, 2019, 1:53 p.m. UTC | #1
On Fri 02-08-19 12:08:13, Tejun Heo wrote:
> As inode wb switching may make sync(2) miss some inodes, they're
> synchronized using wb_switch_rwsem so that no wb switching happens
> while sync(2) is in progress.  In addition to synchronizing the actual
> switching, the rwsem is also used to prevent queueing new switch
> attempts while sync(2) is in progress.  This is to avoid queueing too
> many instances while the rwsem is held by sync(2).  Unfortunately,
> this is too agressive and can block wb switching for a long time if
> sync(2) is frequent.
> 
> The goal is avoiding expolding the number of scheduled switches, not
> avoiding scheduling anything.  Let's use wb_switch_rwsem only for
> synchronizing the actual switching and sync(2) and use
> isw_nr_in_flight instead for limiting the maximum number of scheduled
> switches.  The limit is set to 1024 which should be more than enough
> while still avoiding extreme situations.
> 
> Signed-off-by: Tejun Heo <tj@kernel.org>

Looks good to me. You can add:

Reviewed-by: Jan Kara <jack@suse.cz>

								Honza


> ---
>  fs/fs-writeback.c |   17 +++++------------
>  1 file changed, 5 insertions(+), 12 deletions(-)
> 
> --- a/fs/fs-writeback.c
> +++ b/fs/fs-writeback.c
> @@ -237,6 +237,7 @@ static void wb_wait_for_completion(struc
>  					/* if foreign slots >= 8, switch */
>  #define WB_FRN_HIST_MAX_SLOTS	(WB_FRN_HIST_THR_SLOTS / 2 + 1)
>  					/* one round can affect upto 5 slots */
> +#define WB_FRN_MAX_IN_FLIGHT	1024	/* don't queue too many concurrently */
>  
>  static atomic_t isw_nr_in_flight = ATOMIC_INIT(0);
>  static struct workqueue_struct *isw_wq;
> @@ -489,18 +490,13 @@ static void inode_switch_wbs(struct inod
>  	if (inode->i_state & I_WB_SWITCH)
>  		return;
>  
> -	/*
> -	 * Avoid starting new switches while sync_inodes_sb() is in
> -	 * progress.  Otherwise, if the down_write protected issue path
> -	 * blocks heavily, we might end up starting a large number of
> -	 * switches which will block on the rwsem.
> -	 */
> -	if (!down_read_trylock(&bdi->wb_switch_rwsem))
> +	/* avoid queueing a new switch if too many are already in flight */
> +	if (atomic_read(&isw_nr_in_flight) > WB_FRN_MAX_IN_FLIGHT)
>  		return;
>  
>  	isw = kzalloc(sizeof(*isw), GFP_ATOMIC);
>  	if (!isw)
> -		goto out_unlock;
> +		return;
>  
>  	/* find and pin the new wb */
>  	rcu_read_lock();
> @@ -534,15 +530,12 @@ static void inode_switch_wbs(struct inod
>  	call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
>  
>  	atomic_inc(&isw_nr_in_flight);
> -
> -	goto out_unlock;
> +	return;
>  
>  out_free:
>  	if (isw->new_wb)
>  		wb_put(isw->new_wb);
>  	kfree(isw);
> -out_unlock:
> -	up_read(&bdi->wb_switch_rwsem);
>  }
>  
>  /**
diff mbox series

Patch

--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -237,6 +237,7 @@  static void wb_wait_for_completion(struc
 					/* if foreign slots >= 8, switch */
 #define WB_FRN_HIST_MAX_SLOTS	(WB_FRN_HIST_THR_SLOTS / 2 + 1)
 					/* one round can affect upto 5 slots */
+#define WB_FRN_MAX_IN_FLIGHT	1024	/* don't queue too many concurrently */
 
 static atomic_t isw_nr_in_flight = ATOMIC_INIT(0);
 static struct workqueue_struct *isw_wq;
@@ -489,18 +490,13 @@  static void inode_switch_wbs(struct inod
 	if (inode->i_state & I_WB_SWITCH)
 		return;
 
-	/*
-	 * Avoid starting new switches while sync_inodes_sb() is in
-	 * progress.  Otherwise, if the down_write protected issue path
-	 * blocks heavily, we might end up starting a large number of
-	 * switches which will block on the rwsem.
-	 */
-	if (!down_read_trylock(&bdi->wb_switch_rwsem))
+	/* avoid queueing a new switch if too many are already in flight */
+	if (atomic_read(&isw_nr_in_flight) > WB_FRN_MAX_IN_FLIGHT)
 		return;
 
 	isw = kzalloc(sizeof(*isw), GFP_ATOMIC);
 	if (!isw)
-		goto out_unlock;
+		return;
 
 	/* find and pin the new wb */
 	rcu_read_lock();
@@ -534,15 +530,12 @@  static void inode_switch_wbs(struct inod
 	call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
 
 	atomic_inc(&isw_nr_in_flight);
-
-	goto out_unlock;
+	return;
 
 out_free:
 	if (isw->new_wb)
 		wb_put(isw->new_wb);
 	kfree(isw);
-out_unlock:
-	up_read(&bdi->wb_switch_rwsem);
 }
 
 /**