diff mbox

lightnvm: update closed list out of interrupt context

Message ID 1453723113-30664-1-git-send-email-javier@javigon.com (mailing list archive)
State New, archived
Headers show

Commit Message

=?UTF-8?q?Javier=20Gonz=C3=A1lez?= Jan. 25, 2016, 11:58 a.m. UTC
When an I/O finishes, full blocks are moved from the open to the closed
list - a lock is taken to protect the list. This happens at the moment
in the interrupt context, which is not correct.

This patch moves this logic to the GC workqueue instead, thus avoiding
holding a spinlock in an atomic context.

Signed-off-by: Javier González <javier@cnexlabs.com>
---
 drivers/lightnvm/rrpc.c | 23 ++++++++++-------------
 1 file changed, 10 insertions(+), 13 deletions(-)

Comments

Matias Bjorling Jan. 26, 2016, 1:12 p.m. UTC | #1
On 01/25/2016 12:58 PM, Javier González wrote:
> When an I/O finishes, full blocks are moved from the open to the closed
> list - a lock is taken to protect the list. This happens at the moment
> in the interrupt context, which is not correct.
> 
> This patch moves this logic to the GC workqueue instead, thus avoiding
> holding a spinlock in an atomic context.
> 
> Signed-off-by: Javier González <javier@cnexlabs.com>
> ---
>  drivers/lightnvm/rrpc.c | 23 ++++++++++-------------
>  1 file changed, 10 insertions(+), 13 deletions(-)
> 
> diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
> index d8c7595..e2710da 100644
> --- a/drivers/lightnvm/rrpc.c
> +++ b/drivers/lightnvm/rrpc.c
> @@ -497,12 +497,21 @@ static void rrpc_gc_queue(struct work_struct *work)
>  	struct rrpc *rrpc = gcb->rrpc;
>  	struct rrpc_block *rblk = gcb->rblk;
>  	struct nvm_lun *lun = rblk->parent->lun;
> +	struct nvm_block *blk = rblk->parent;
>  	struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
>  
>  	spin_lock(&rlun->lock);
>  	list_add_tail(&rblk->prio, &rlun->prio_list);
>  	spin_unlock(&rlun->lock);
>  
> +	spin_lock(&lun->lock);
> +	lun->nr_open_blocks--;
> +	lun->nr_closed_blocks++;
> +	blk->state &= ~NVM_BLK_ST_OPEN;
> +	blk->state |= NVM_BLK_ST_CLOSED;
> +	list_move_tail(&rblk->list, &rlun->closed_list);
> +	spin_unlock(&lun->lock);
> +
>  	mempool_free(gcb, rrpc->gcb_pool);
>  	pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
>  							rblk->parent->id);
> @@ -666,20 +675,8 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
>  		lun = rblk->parent->lun;
>  
>  		cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
> -		if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) {
> -			struct nvm_block *blk = rblk->parent;
> -			struct rrpc_lun *rlun = rblk->rlun;
> -
> -			spin_lock(&lun->lock);
> -			lun->nr_open_blocks--;
> -			lun->nr_closed_blocks++;
> -			blk->state &= ~NVM_BLK_ST_OPEN;
> -			blk->state |= NVM_BLK_ST_CLOSED;
> -			list_move_tail(&rblk->list, &rlun->closed_list);
> -			spin_unlock(&lun->lock);
> -
> +		if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk))
>  			rrpc_run_gc(rrpc, rblk);
> -		}
>  	}
>  }
>  
> 

Thanks, applied. I have updated the patch text a bit and added Fixes:
field to mark the patch a fix for the ff0e498b "lightnvm: manage open
and closed blocks separately".
--
To unsubscribe from this list: send the line "unsubscribe linux-block" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index d8c7595..e2710da 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -497,12 +497,21 @@  static void rrpc_gc_queue(struct work_struct *work)
 	struct rrpc *rrpc = gcb->rrpc;
 	struct rrpc_block *rblk = gcb->rblk;
 	struct nvm_lun *lun = rblk->parent->lun;
+	struct nvm_block *blk = rblk->parent;
 	struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
 
 	spin_lock(&rlun->lock);
 	list_add_tail(&rblk->prio, &rlun->prio_list);
 	spin_unlock(&rlun->lock);
 
+	spin_lock(&lun->lock);
+	lun->nr_open_blocks--;
+	lun->nr_closed_blocks++;
+	blk->state &= ~NVM_BLK_ST_OPEN;
+	blk->state |= NVM_BLK_ST_CLOSED;
+	list_move_tail(&rblk->list, &rlun->closed_list);
+	spin_unlock(&lun->lock);
+
 	mempool_free(gcb, rrpc->gcb_pool);
 	pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
 							rblk->parent->id);
@@ -666,20 +675,8 @@  static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
 		lun = rblk->parent->lun;
 
 		cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
-		if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) {
-			struct nvm_block *blk = rblk->parent;
-			struct rrpc_lun *rlun = rblk->rlun;
-
-			spin_lock(&lun->lock);
-			lun->nr_open_blocks--;
-			lun->nr_closed_blocks++;
-			blk->state &= ~NVM_BLK_ST_OPEN;
-			blk->state |= NVM_BLK_ST_CLOSED;
-			list_move_tail(&rblk->list, &rlun->closed_list);
-			spin_unlock(&lun->lock);
-
+		if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk))
 			rrpc_run_gc(rrpc, rblk);
-		}
 	}
 }