diff mbox

[1/1] dmaengine: hsu: speed up residue calculation

Message ID 1447776030-18654-1-git-send-email-andriy.shevchenko@linux.intel.com (mailing list archive)
State Accepted
Headers show

Commit Message

Andy Shevchenko Nov. 17, 2015, 4 p.m. UTC
There is no need to calculate an overall length of the descriptor each time we
call for DMA transfer status. Instead we do this at descriptor allocation stage
and keep the stored length for further usage.

Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
---
 drivers/dma/hsu/hsu.c | 17 ++++-------------
 drivers/dma/hsu/hsu.h |  1 +
 2 files changed, 5 insertions(+), 13 deletions(-)

Comments

Andy Shevchenko Nov. 24, 2015, 9:46 a.m. UTC | #1
On Tue, 2015-11-17 at 18:00 +0200, Andy Shevchenko wrote:
> There is no need to calculate an overall length of the descriptor
> each time we
> call for DMA transfer status. Instead we do this at descriptor
> allocation stage
> and keep the stored length for further usage.

Ping?

> 
> Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
> ---
>  drivers/dma/hsu/hsu.c | 17 ++++-------------
>  drivers/dma/hsu/hsu.h |  1 +
>  2 files changed, 5 insertions(+), 13 deletions(-)
> 
> diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
> index 823ad72..eef145e 100644
> --- a/drivers/dma/hsu/hsu.c
> +++ b/drivers/dma/hsu/hsu.c
> @@ -228,6 +228,8 @@ static struct dma_async_tx_descriptor
> *hsu_dma_prep_slave_sg(
>  	for_each_sg(sgl, sg, sg_len, i) {
>  		desc->sg[i].addr = sg_dma_address(sg);
>  		desc->sg[i].len = sg_dma_len(sg);
> +
> +		desc->length += sg_dma_len(sg);
>  	}
>  
>  	desc->nents = sg_len;
> @@ -249,21 +251,10 @@ static void hsu_dma_issue_pending(struct
> dma_chan *chan)
>  	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
>  }
>  
> -static size_t hsu_dma_desc_size(struct hsu_dma_desc *desc)
> -{
> -	size_t bytes = 0;
> -	unsigned int i;
> -
> -	for (i = desc->active; i < desc->nents; i++)
> -		bytes += desc->sg[i].len;
> -
> -	return bytes;
> -}
> -
>  static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
>  {
>  	struct hsu_dma_desc *desc = hsuc->desc;
> -	size_t bytes = hsu_dma_desc_size(desc);
> +	size_t bytes = desc->length;
>  	int i;
>  
>  	i = desc->active % HSU_DMA_CHAN_NR_DESC;
> @@ -294,7 +285,7 @@ static enum dma_status hsu_dma_tx_status(struct
> dma_chan *chan,
>  		dma_set_residue(state, bytes);
>  		status = hsuc->desc->status;
>  	} else if (vdesc) {
> -		bytes = hsu_dma_desc_size(to_hsu_dma_desc(vdesc));
> +		bytes = to_hsu_dma_desc(vdesc)->length;
>  		dma_set_residue(state, bytes);
>  	}
>  	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
> diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
> index f06579c..578a8ee 100644
> --- a/drivers/dma/hsu/hsu.h
> +++ b/drivers/dma/hsu/hsu.h
> @@ -65,6 +65,7 @@ struct hsu_dma_desc {
>  	enum dma_transfer_direction direction;
>  	struct hsu_dma_sg *sg;
>  	unsigned int nents;
> +	size_t length;
>  	unsigned int active;
>  	enum dma_status status;
>  };
Vinod Koul Dec. 5, 2015, 8:31 a.m. UTC | #2
On Tue, Nov 17, 2015 at 06:00:30PM +0200, Andy Shevchenko wrote:
> There is no need to calculate an overall length of the descriptor each time we
> call for DMA transfer status. Instead we do this at descriptor allocation stage
> and keep the stored length for further usage.

Applied, thanks
diff mbox

Patch

diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index 823ad72..eef145e 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -228,6 +228,8 @@  static struct dma_async_tx_descriptor *hsu_dma_prep_slave_sg(
 	for_each_sg(sgl, sg, sg_len, i) {
 		desc->sg[i].addr = sg_dma_address(sg);
 		desc->sg[i].len = sg_dma_len(sg);
+
+		desc->length += sg_dma_len(sg);
 	}
 
 	desc->nents = sg_len;
@@ -249,21 +251,10 @@  static void hsu_dma_issue_pending(struct dma_chan *chan)
 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 }
 
-static size_t hsu_dma_desc_size(struct hsu_dma_desc *desc)
-{
-	size_t bytes = 0;
-	unsigned int i;
-
-	for (i = desc->active; i < desc->nents; i++)
-		bytes += desc->sg[i].len;
-
-	return bytes;
-}
-
 static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
 {
 	struct hsu_dma_desc *desc = hsuc->desc;
-	size_t bytes = hsu_dma_desc_size(desc);
+	size_t bytes = desc->length;
 	int i;
 
 	i = desc->active % HSU_DMA_CHAN_NR_DESC;
@@ -294,7 +285,7 @@  static enum dma_status hsu_dma_tx_status(struct dma_chan *chan,
 		dma_set_residue(state, bytes);
 		status = hsuc->desc->status;
 	} else if (vdesc) {
-		bytes = hsu_dma_desc_size(to_hsu_dma_desc(vdesc));
+		bytes = to_hsu_dma_desc(vdesc)->length;
 		dma_set_residue(state, bytes);
 	}
 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
index f06579c..578a8ee 100644
--- a/drivers/dma/hsu/hsu.h
+++ b/drivers/dma/hsu/hsu.h
@@ -65,6 +65,7 @@  struct hsu_dma_desc {
 	enum dma_transfer_direction direction;
 	struct hsu_dma_sg *sg;
 	unsigned int nents;
+	size_t length;
 	unsigned int active;
 	enum dma_status status;
 };