diff mbox

dmaengine: fsl raid parity continuation support

Message ID 1405322401-20156-1-git-send-email-xuelin.shi@freescale.com (mailing list archive)
State Not Applicable
Delegated to: Dan Williams
Headers show

Commit Message

Xuelin Shi July 14, 2014, 7:20 a.m. UTC
From: Xuelin Shi <xuelin.shi@freescale.com>

support more than 16 disks parity computation.

Signed-off-by: Xuelin Shi <b29237@freescale.com>
---
 comment: passed the raid6 recovery test.

 drivers/dma/fsl_raid.c | 32 +++++++++++++++++++++++++++++---
 1 file changed, 29 insertions(+), 3 deletions(-)
diff mbox

Patch

diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c
index 1dc5981..935d05a 100644
--- a/drivers/dma/fsl_raid.c
+++ b/drivers/dma/fsl_raid.c
@@ -328,6 +328,8 @@  static struct dma_async_tx_descriptor *re_jr_prep_genq(
 	struct cmpnd_frame *cf;
 	u32 cdb;
 	unsigned int i, j;
+	unsigned int save_src_cnt = src_cnt;
+	int cont_q = 0;
 
 	if (len > MAX_DATA_LENGTH) {
 		pr_err("Length greater than %d not supported\n",
@@ -340,6 +342,11 @@  static struct dma_async_tx_descriptor *re_jr_prep_genq(
 	if (desc <= 0)
 		return NULL;
 
+	if (scf && (flags & DMA_PREP_CONTINUE)) {
+		cont_q = 1;
+		src_cnt += 1;
+	}
+
 	/* Filling xor CDB */
 	cdb = RE_XOR_OPCODE << RE_CDB_OPCODE_SHIFT;
 	cdb |= (src_cnt - 1) << RE_CDB_NRCS_SHIFT;
@@ -351,8 +358,10 @@  static struct dma_async_tx_descriptor *re_jr_prep_genq(
 
 	if (scf != NULL) {
 		/* compute q = src0*coef0^src1*coef1^..., * is GF(8) mult */
-		for (i = 0; i < src_cnt; i++)
+		for (i = 0; i < save_src_cnt; i++)
 			xor->gfm[i] = scf[i];
+		if (cont_q)
+			xor->gfm[i++] = 1;
 	} else {
 		/* compute P, that is XOR all srcs */
 		for (i = 0; i < src_cnt; i++)
@@ -367,9 +376,12 @@  static struct dma_async_tx_descriptor *re_jr_prep_genq(
 	fill_cfd_frame(cf, 1, len, dest, 0);
 
 	/* Fill CFD's rest of the frames with source buffers */
-	for (i = 2, j = 0; j < src_cnt; i++, j++)
+	for (i = 2, j = 0; j < save_src_cnt; i++, j++)
 		fill_cfd_frame(cf, i, len, src[j], 0);
 
+	if (cont_q)
+		fill_cfd_frame(cf, i++, len, dest, 0);
+
 	/* Setting the final bit in the last source buffer frame in CFD */
 	cf[i - 1].efrl32 |= 1 << CF_FINAL_SHIFT;
 
@@ -404,6 +416,7 @@  static struct dma_async_tx_descriptor *re_jr_prep_pq(
 	u32 cdb;
 	u8 *p;
 	int gfmq_len, i, j;
+	unsigned long save_src_cnt = src_cnt;
 
 	if (len > MAX_DATA_LENGTH) {
 		pr_err("Length greater than %d not supported\n",
@@ -445,6 +458,9 @@  static struct dma_async_tx_descriptor *re_jr_prep_pq(
 		return re_jr_prep_genq(chan, dest[1], src, src_cnt,
 				scf, len, flags);
 
+	if (flags & DMA_PREP_CONTINUE)
+		src_cnt += 3;
+
 	jr = container_of(chan, struct re_jr, chan);
 	desc = re_jr_alloc_desc(jr, flags);
 	if (desc <= 0)
@@ -482,9 +498,19 @@  static struct dma_async_tx_descriptor *re_jr_prep_pq(
 		fill_cfd_frame(cf, i, len, dest[j], 0);
 
 	/* Fill CFD's rest of the frames with source buffers */
-	for (i = 3, j = 0; j < src_cnt; i++, j++)
+	for (i = 3, j = 0; j < save_src_cnt; i++, j++)
 		fill_cfd_frame(cf, i, len, src[j], 0);
 
+	/* PQ computation continuation */
+	if (src_cnt - save_src_cnt == 3) {
+		p[save_src_cnt] = 0;
+		p[save_src_cnt + 1] = 0;
+		p[save_src_cnt + 2] = 1;
+		fill_cfd_frame(cf, i++, len, dest[0], 0);
+		fill_cfd_frame(cf, i++, len, dest[1], 0);
+		fill_cfd_frame(cf, i++, len, dest[1], 0);
+	}
+
 	/* Setting the final bit in the last source buffer frame in CFD */
 	cf[i - 1].efrl32 |= 1 << CF_FINAL_SHIFT;