linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 1/4] mmc: dw_mmc: split out preparation of desc for IDMAC32 and IDMAC64
@ 2016-09-02  4:14 ` Shawn Lin
  2016-09-02  4:14   ` [PATCH v2 2/4] mmc: dw_mmc: avoid race condition of cpu and IDMAC Shawn Lin
                     ` (3 more replies)
  0 siblings, 4 replies; 11+ messages in thread
From: Shawn Lin @ 2016-09-02  4:14 UTC (permalink / raw)
  To: Jaehoon Chung
  Cc: Ulf Hansson, linux-mmc, linux-kernel, Doug Anderson,
	Heiko Stuebner, linux-rockchip, Shawn Lin

We intend to add more check for descriptors when
preparing desc. Let's spilt out the separate body
to make the dw_mci_translate_sglist not so lengthy.
After spliting out these two functions, we could
remove dw_mci_translate_sglist and call both of them
when staring idmac.

Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>

---

Changes in v2:
- remove dw_mci_translate_sglist

 drivers/mmc/host/dw_mmc.c | 149 ++++++++++++++++++++++++----------------------
 1 file changed, 79 insertions(+), 70 deletions(-)

diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 22dacae..782b303 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -467,112 +467,121 @@ static void dw_mci_dmac_complete_dma(void *arg)
 	}
 }
 
-static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
-				    unsigned int sg_len)
+static inline void dw_mci_prepare_desc64(struct dw_mci *host,
+					 struct mmc_data *data,
+					 unsigned int sg_len)
 {
 	unsigned int desc_len;
+	struct idmac_desc_64addr *desc_first, *desc_last, *desc;
 	int i;
 
-	if (host->dma_64bit_address == 1) {
-		struct idmac_desc_64addr *desc_first, *desc_last, *desc;
-
-		desc_first = desc_last = desc = host->sg_cpu;
+	desc_first = desc_last = desc = host->sg_cpu;
 
-		for (i = 0; i < sg_len; i++) {
-			unsigned int length = sg_dma_len(&data->sg[i]);
+	for (i = 0; i < sg_len; i++) {
+		unsigned int length = sg_dma_len(&data->sg[i]);
 
-			u64 mem_addr = sg_dma_address(&data->sg[i]);
+		u64 mem_addr = sg_dma_address(&data->sg[i]);
 
-			for ( ; length ; desc++) {
-				desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
-					   length : DW_MCI_DESC_DATA_LENGTH;
+		for ( ; length ; desc++) {
+			desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
+				   length : DW_MCI_DESC_DATA_LENGTH;
 
-				length -= desc_len;
+			length -= desc_len;
 
-				/*
-				 * Set the OWN bit and disable interrupts
-				 * for this descriptor
-				 */
-				desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
-							IDMAC_DES0_CH;
+			/*
+			 * Set the OWN bit and disable interrupts
+			 * for this descriptor
+			 */
+			desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
+						IDMAC_DES0_CH;
 
-				/* Buffer length */
-				IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
+			/* Buffer length */
+			IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
 
-				/* Physical address to DMA to/from */
-				desc->des4 = mem_addr & 0xffffffff;
-				desc->des5 = mem_addr >> 32;
+			/* Physical address to DMA to/from */
+			desc->des4 = mem_addr & 0xffffffff;
+			desc->des5 = mem_addr >> 32;
 
-				/* Update physical address for the next desc */
-				mem_addr += desc_len;
+			/* Update physical address for the next desc */
+			mem_addr += desc_len;
 
-				/* Save pointer to the last descriptor */
-				desc_last = desc;
-			}
+			/* Save pointer to the last descriptor */
+			desc_last = desc;
 		}
+	}
 
-		/* Set first descriptor */
-		desc_first->des0 |= IDMAC_DES0_FD;
+	/* Set first descriptor */
+	desc_first->des0 |= IDMAC_DES0_FD;
 
-		/* Set last descriptor */
-		desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
-		desc_last->des0 |= IDMAC_DES0_LD;
+	/* Set last descriptor */
+	desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
+	desc_last->des0 |= IDMAC_DES0_LD;
+}
 
-	} else {
-		struct idmac_desc *desc_first, *desc_last, *desc;
 
-		desc_first = desc_last = desc = host->sg_cpu;
+static inline void dw_mci_prepare_desc32(struct dw_mci *host,
+					 struct mmc_data *data,
+					 unsigned int sg_len)
+{
+	unsigned int desc_len;
+	struct idmac_desc *desc_first, *desc_last, *desc;
+	int i;
 
-		for (i = 0; i < sg_len; i++) {
-			unsigned int length = sg_dma_len(&data->sg[i]);
+	desc_first = desc_last = desc = host->sg_cpu;
 
-			u32 mem_addr = sg_dma_address(&data->sg[i]);
+	for (i = 0; i < sg_len; i++) {
+		unsigned int length = sg_dma_len(&data->sg[i]);
 
-			for ( ; length ; desc++) {
-				desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
-					   length : DW_MCI_DESC_DATA_LENGTH;
+		u32 mem_addr = sg_dma_address(&data->sg[i]);
 
-				length -= desc_len;
+		for ( ; length ; desc++) {
+			desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
+				   length : DW_MCI_DESC_DATA_LENGTH;
 
-				/*
-				 * Set the OWN bit and disable interrupts
-				 * for this descriptor
-				 */
-				desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
-							 IDMAC_DES0_DIC |
-							 IDMAC_DES0_CH);
+			length -= desc_len;
 
-				/* Buffer length */
-				IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
+			/*
+			 * Set the OWN bit and disable interrupts
+			 * for this descriptor
+			 */
+			desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
+						 IDMAC_DES0_DIC |
+						 IDMAC_DES0_CH);
 
-				/* Physical address to DMA to/from */
-				desc->des2 = cpu_to_le32(mem_addr);
+			/* Buffer length */
+			IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
 
-				/* Update physical address for the next desc */
-				mem_addr += desc_len;
+			/* Physical address to DMA to/from */
+			desc->des2 = cpu_to_le32(mem_addr);
 
-				/* Save pointer to the last descriptor */
-				desc_last = desc;
-			}
-		}
-
-		/* Set first descriptor */
-		desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
+			/* Update physical address for the next desc */
+			mem_addr += desc_len;
 
-		/* Set last descriptor */
-		desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
-					       IDMAC_DES0_DIC));
-		desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
+			/* Save pointer to the last descriptor */
+			desc_last = desc;
+		}
 	}
 
-	wmb(); /* drain writebuffer */
+	/* Set first descriptor */
+	desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
+
+	/* Set last descriptor */
+	desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
+				       IDMAC_DES0_DIC));
+	desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
 }
 
 static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
 {
 	u32 temp;
 
-	dw_mci_translate_sglist(host, host->data, sg_len);
+	if (host->dma_64bit_address == 1)
+		dw_mci_prepare_desc64(host, host->data, sg_len);
+	else
+		dw_mci_prepare_desc32(host, host->data, sg_len);
+
+	/* drain writebuffer */
+	wmb();
 
 	/* Make sure to reset DMA in case we did PIO before this */
 	dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
-- 
2.3.7

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v2 2/4] mmc: dw_mmc: avoid race condition of cpu and IDMAC
  2016-09-02  4:14 ` [PATCH v2 1/4] mmc: dw_mmc: split out preparation of desc for IDMAC32 and IDMAC64 Shawn Lin
@ 2016-09-02  4:14   ` Shawn Lin
  2016-09-20  9:47     ` Shawn Lin
  2016-09-22  4:26     ` Jaehoon Chung
  2016-09-02  4:14   ` [PATCH v2 3/4] mmc: dw_mmc: fix misleading error print if failing to do DMA transfer Shawn Lin
                     ` (2 subsequent siblings)
  3 siblings, 2 replies; 11+ messages in thread
From: Shawn Lin @ 2016-09-02  4:14 UTC (permalink / raw)
  To: Jaehoon Chung
  Cc: Ulf Hansson, linux-mmc, linux-kernel, Doug Anderson,
	Heiko Stuebner, linux-rockchip, Shawn Lin

We could see an obvious race condition by test that
the former write operation by IDMAC aiming to clear
OWN bit reach right after the later configuration of
the same desc, which makes the IDMAC be in SUSPEND
state as the OWN bit was cleared by the asynchronous
write operation of IDMAC. The bug can be very easy
reproduced on RK3288 or similar when we reduce the
running rate of system buses and keep the CPU running
faster. So as two separate masters, IDMAC and cpu
write the same descriptor stored on the same address,
and this should be protected by adding check of OWN
bit before preparing new descriptors.

Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>

---

Changes in v2:
- fallback to PIO mode if failing to wait for OWN bit
- cleanup polluted desc chain as the own bit error could
  occur on any place of the chain, so we need to clr the desc
  configured before that one. Use the exist function to reinit
  the desc chain. As this issue is really rare, so after applied,
  the fio/iozone stress test didn't show up performance regression.

 drivers/mmc/host/dw_mmc.c | 208 ++++++++++++++++++++++++++++------------------
 1 file changed, 129 insertions(+), 79 deletions(-)

diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 782b303..daa1c52 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -467,12 +467,87 @@ static void dw_mci_dmac_complete_dma(void *arg)
 	}
 }
 
-static inline void dw_mci_prepare_desc64(struct dw_mci *host,
+static int dw_mci_idmac_init(struct dw_mci *host)
+{
+	int i;
+
+	if (host->dma_64bit_address == 1) {
+		struct idmac_desc_64addr *p;
+		/* Number of descriptors in the ring buffer */
+		host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc_64addr);
+
+		/* Forward link the descriptor list */
+		for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
+								i++, p++) {
+			p->des6 = (host->sg_dma +
+					(sizeof(struct idmac_desc_64addr) *
+							(i + 1))) & 0xffffffff;
+
+			p->des7 = (u64)(host->sg_dma +
+					(sizeof(struct idmac_desc_64addr) *
+							(i + 1))) >> 32;
+			/* Initialize reserved and buffer size fields to "0" */
+			p->des1 = 0;
+			p->des2 = 0;
+			p->des3 = 0;
+		}
+
+		/* Set the last descriptor as the end-of-ring descriptor */
+		p->des6 = host->sg_dma & 0xffffffff;
+		p->des7 = (u64)host->sg_dma >> 32;
+		p->des0 = IDMAC_DES0_ER;
+
+	} else {
+		struct idmac_desc *p;
+		/* Number of descriptors in the ring buffer */
+		host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
+
+		/* Forward link the descriptor list */
+		for (i = 0, p = host->sg_cpu;
+		     i < host->ring_size - 1;
+		     i++, p++) {
+			p->des3 = cpu_to_le32(host->sg_dma +
+					(sizeof(struct idmac_desc) * (i + 1)));
+			p->des1 = 0;
+		}
+
+		/* Set the last descriptor as the end-of-ring descriptor */
+		p->des3 = cpu_to_le32(host->sg_dma);
+		p->des0 = cpu_to_le32(IDMAC_DES0_ER);
+	}
+
+	dw_mci_idmac_reset(host);
+
+	if (host->dma_64bit_address == 1) {
+		/* Mask out interrupts - get Tx & Rx complete only */
+		mci_writel(host, IDSTS64, IDMAC_INT_CLR);
+		mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
+				SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
+
+		/* Set the descriptor base address */
+		mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
+		mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
+
+	} else {
+		/* Mask out interrupts - get Tx & Rx complete only */
+		mci_writel(host, IDSTS, IDMAC_INT_CLR);
+		mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
+				SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
+
+		/* Set the descriptor base address */
+		mci_writel(host, DBADDR, host->sg_dma);
+	}
+
+	return 0;
+}
+
+static inline int dw_mci_prepare_desc64(struct dw_mci *host,
 					 struct mmc_data *data,
 					 unsigned int sg_len)
 {
 	unsigned int desc_len;
 	struct idmac_desc_64addr *desc_first, *desc_last, *desc;
+	unsigned long timeout;
 	int i;
 
 	desc_first = desc_last = desc = host->sg_cpu;
@@ -489,6 +564,19 @@ static inline void dw_mci_prepare_desc64(struct dw_mci *host,
 			length -= desc_len;
 
 			/*
+			 * Wait for the former clear OWN bit operation
+			 * of IDMAC to make sure that this descriptor
+			 * isn't still owned by IDMAC as IDMAC's write
+			 * ops and CPU's read ops are asynchronous.
+			 */
+			timeout = jiffies + msecs_to_jiffies(100);
+			while (readl(&desc->des0) & IDMAC_DES0_OWN) {
+				if (time_after(jiffies, timeout))
+					goto err_own_bit;
+				udelay(10);
+			}
+
+			/*
 			 * Set the OWN bit and disable interrupts
 			 * for this descriptor
 			 */
@@ -516,15 +604,24 @@ static inline void dw_mci_prepare_desc64(struct dw_mci *host,
 	/* Set last descriptor */
 	desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
 	desc_last->des0 |= IDMAC_DES0_LD;
+
+	return 0;
+err_own_bit:
+	/* restore the descriptor chain as it's polluted */
+	dev_dbg(host->dev, "desciptor is still owned by IDMAC.\n");
+	memset(host->sg_cpu, 0, PAGE_SIZE);
+	dw_mci_idmac_init(host);
+	return -EINVAL;
 }
 
 
-static inline void dw_mci_prepare_desc32(struct dw_mci *host,
+static inline int dw_mci_prepare_desc32(struct dw_mci *host,
 					 struct mmc_data *data,
 					 unsigned int sg_len)
 {
 	unsigned int desc_len;
 	struct idmac_desc *desc_first, *desc_last, *desc;
+	unsigned long timeout;
 	int i;
 
 	desc_first = desc_last = desc = host->sg_cpu;
@@ -541,6 +638,20 @@ static inline void dw_mci_prepare_desc32(struct dw_mci *host,
 			length -= desc_len;
 
 			/*
+			 * Wait for the former clear OWN bit operation
+			 * of IDMAC to make sure that this descriptor
+			 * isn't still owned by IDMAC as IDMAC's write
+			 * ops and CPU's read ops are asynchronous.
+			 */
+			timeout = jiffies + msecs_to_jiffies(100);
+			while (readl(&desc->des0) &
+			       cpu_to_le32(IDMAC_DES0_OWN)) {
+				if (time_after(jiffies, timeout))
+					goto err_own_bit;
+				udelay(10);
+			}
+
+			/*
 			 * Set the OWN bit and disable interrupts
 			 * for this descriptor
 			 */
@@ -569,16 +680,28 @@ static inline void dw_mci_prepare_desc32(struct dw_mci *host,
 	desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
 				       IDMAC_DES0_DIC));
 	desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
+
+	return 0;
+err_own_bit:
+	/* restore the descriptor chain as it's polluted */
+	dev_dbg(host->dev, "desciptor is still owned by IDMAC.\n");
+	memset(host->sg_cpu, 0, PAGE_SIZE);
+	dw_mci_idmac_init(host);
+	return -EINVAL;
 }
 
 static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
 {
 	u32 temp;
+	int ret;
 
 	if (host->dma_64bit_address == 1)
-		dw_mci_prepare_desc64(host, host->data, sg_len);
+		ret = dw_mci_prepare_desc64(host, host->data, sg_len);
 	else
-		dw_mci_prepare_desc32(host, host->data, sg_len);
+		ret = dw_mci_prepare_desc32(host, host->data, sg_len);
+
+	if (ret)
+		goto out;
 
 	/* drain writebuffer */
 	wmb();
@@ -603,81 +726,8 @@ static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
 	/* Start it running */
 	mci_writel(host, PLDMND, 1);
 
-	return 0;
-}
-
-static int dw_mci_idmac_init(struct dw_mci *host)
-{
-	int i;
-
-	if (host->dma_64bit_address == 1) {
-		struct idmac_desc_64addr *p;
-		/* Number of descriptors in the ring buffer */
-		host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc_64addr);
-
-		/* Forward link the descriptor list */
-		for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
-								i++, p++) {
-			p->des6 = (host->sg_dma +
-					(sizeof(struct idmac_desc_64addr) *
-							(i + 1))) & 0xffffffff;
-
-			p->des7 = (u64)(host->sg_dma +
-					(sizeof(struct idmac_desc_64addr) *
-							(i + 1))) >> 32;
-			/* Initialize reserved and buffer size fields to "0" */
-			p->des1 = 0;
-			p->des2 = 0;
-			p->des3 = 0;
-		}
-
-		/* Set the last descriptor as the end-of-ring descriptor */
-		p->des6 = host->sg_dma & 0xffffffff;
-		p->des7 = (u64)host->sg_dma >> 32;
-		p->des0 = IDMAC_DES0_ER;
-
-	} else {
-		struct idmac_desc *p;
-		/* Number of descriptors in the ring buffer */
-		host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
-
-		/* Forward link the descriptor list */
-		for (i = 0, p = host->sg_cpu;
-		     i < host->ring_size - 1;
-		     i++, p++) {
-			p->des3 = cpu_to_le32(host->sg_dma +
-					(sizeof(struct idmac_desc) * (i + 1)));
-			p->des1 = 0;
-		}
-
-		/* Set the last descriptor as the end-of-ring descriptor */
-		p->des3 = cpu_to_le32(host->sg_dma);
-		p->des0 = cpu_to_le32(IDMAC_DES0_ER);
-	}
-
-	dw_mci_idmac_reset(host);
-
-	if (host->dma_64bit_address == 1) {
-		/* Mask out interrupts - get Tx & Rx complete only */
-		mci_writel(host, IDSTS64, IDMAC_INT_CLR);
-		mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
-				SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
-
-		/* Set the descriptor base address */
-		mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
-		mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
-
-	} else {
-		/* Mask out interrupts - get Tx & Rx complete only */
-		mci_writel(host, IDSTS, IDMAC_INT_CLR);
-		mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
-				SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
-
-		/* Set the descriptor base address */
-		mci_writel(host, DBADDR, host->sg_dma);
-	}
-
-	return 0;
+out:
+	return ret;
 }
 
 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
-- 
2.3.7

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v2 3/4] mmc: dw_mmc: fix misleading error print if failing to do DMA transfer
  2016-09-02  4:14 ` [PATCH v2 1/4] mmc: dw_mmc: split out preparation of desc for IDMAC32 and IDMAC64 Shawn Lin
  2016-09-02  4:14   ` [PATCH v2 2/4] mmc: dw_mmc: avoid race condition of cpu and IDMAC Shawn Lin
@ 2016-09-02  4:14   ` Shawn Lin
  2016-09-22  4:26     ` Jaehoon Chung
  2016-09-02  4:14   ` [PATCH v2 4/4] mmc: dw_mmc: use macro to define ring buffer size Shawn Lin
  2016-09-22  4:25   ` [PATCH v2 1/4] mmc: dw_mmc: split out preparation of desc for IDMAC32 and IDMAC64 Jaehoon Chung
  3 siblings, 1 reply; 11+ messages in thread
From: Shawn Lin @ 2016-09-02  4:14 UTC (permalink / raw)
  To: Jaehoon Chung
  Cc: Ulf Hansson, linux-mmc, linux-kernel, Doug Anderson,
	Heiko Stuebner, linux-rockchip, Shawn Lin

The original log didn't figure out that we could still
finish this transfer by PIO mode even if failing to use
DMA. And it should be kept for debug level instead of
error one.

Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
---

Changes in v2: None

 drivers/mmc/host/dw_mmc.c | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index daa1c52..833b6ad 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1057,8 +1057,10 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
 	spin_unlock_irqrestore(&host->irq_lock, irqflags);
 
 	if (host->dma_ops->start(host, sg_len)) {
-		/* We can't do DMA */
-		dev_err(host->dev, "%s: failed to start DMA.\n", __func__);
+		/* We can't do DMA, try PIO for this one */
+		dev_dbg(host->dev,
+			"%s: fall back to PIO mode for current transfer\n",
+			__func__);
 		return -ENODEV;
 	}
 
-- 
2.3.7

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v2 4/4] mmc: dw_mmc: use macro to define ring buffer size
  2016-09-02  4:14 ` [PATCH v2 1/4] mmc: dw_mmc: split out preparation of desc for IDMAC32 and IDMAC64 Shawn Lin
  2016-09-02  4:14   ` [PATCH v2 2/4] mmc: dw_mmc: avoid race condition of cpu and IDMAC Shawn Lin
  2016-09-02  4:14   ` [PATCH v2 3/4] mmc: dw_mmc: fix misleading error print if failing to do DMA transfer Shawn Lin
@ 2016-09-02  4:14   ` Shawn Lin
  2016-09-22  4:27     ` Jaehoon Chung
  2016-09-22  4:25   ` [PATCH v2 1/4] mmc: dw_mmc: split out preparation of desc for IDMAC32 and IDMAC64 Jaehoon Chung
  3 siblings, 1 reply; 11+ messages in thread
From: Shawn Lin @ 2016-09-02  4:14 UTC (permalink / raw)
  To: Jaehoon Chung
  Cc: Ulf Hansson, linux-mmc, linux-kernel, Doug Anderson,
	Heiko Stuebner, linux-rockchip, Shawn Lin

It's very prone to make mistake as we might forget
to replace all PAGE_SIZEs with new values if we try
to modify the ring buffer size for whatever reasons.
Let's use a macro to define it.

Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
---

Changes in v2: None

 drivers/mmc/host/dw_mmc.c | 15 ++++++++++-----
 1 file changed, 10 insertions(+), 5 deletions(-)

diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 833b6ad..6e5926b 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -61,6 +61,8 @@
 				 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
 				 SDMMC_IDMAC_INT_TI)
 
+#define DESC_RING_BUF_SZ	PAGE_SIZE
+
 struct idmac_desc_64addr {
 	u32		des0;	/* Control Descriptor */
 
@@ -474,7 +476,8 @@ static int dw_mci_idmac_init(struct dw_mci *host)
 	if (host->dma_64bit_address == 1) {
 		struct idmac_desc_64addr *p;
 		/* Number of descriptors in the ring buffer */
-		host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc_64addr);
+		host->ring_size =
+			DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr);
 
 		/* Forward link the descriptor list */
 		for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
@@ -500,7 +503,8 @@ static int dw_mci_idmac_init(struct dw_mci *host)
 	} else {
 		struct idmac_desc *p;
 		/* Number of descriptors in the ring buffer */
-		host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
+		host->ring_size =
+			DESC_RING_BUF_SZ / sizeof(struct idmac_desc);
 
 		/* Forward link the descriptor list */
 		for (i = 0, p = host->sg_cpu;
@@ -609,7 +613,7 @@ static inline int dw_mci_prepare_desc64(struct dw_mci *host,
 err_own_bit:
 	/* restore the descriptor chain as it's polluted */
 	dev_dbg(host->dev, "desciptor is still owned by IDMAC.\n");
-	memset(host->sg_cpu, 0, PAGE_SIZE);
+	memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
 	dw_mci_idmac_init(host);
 	return -EINVAL;
 }
@@ -685,7 +689,7 @@ static inline int dw_mci_prepare_desc32(struct dw_mci *host,
 err_own_bit:
 	/* restore the descriptor chain as it's polluted */
 	dev_dbg(host->dev, "desciptor is still owned by IDMAC.\n");
-	memset(host->sg_cpu, 0, PAGE_SIZE);
+	memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
 	dw_mci_idmac_init(host);
 	return -EINVAL;
 }
@@ -2750,7 +2754,8 @@ static void dw_mci_init_dma(struct dw_mci *host)
 		}
 
 		/* Alloc memory for sg translation */
-		host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
+		host->sg_cpu = dmam_alloc_coherent(host->dev,
+						   DESC_RING_BUF_SZ,
 						   &host->sg_dma, GFP_KERNEL);
 		if (!host->sg_cpu) {
 			dev_err(host->dev,
-- 
2.3.7

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH v2 2/4] mmc: dw_mmc: avoid race condition of cpu and IDMAC
  2016-09-02  4:14   ` [PATCH v2 2/4] mmc: dw_mmc: avoid race condition of cpu and IDMAC Shawn Lin
@ 2016-09-20  9:47     ` Shawn Lin
  2016-09-20  9:49       ` Jaehoon Chung
  2016-09-22  4:26     ` Jaehoon Chung
  1 sibling, 1 reply; 11+ messages in thread
From: Shawn Lin @ 2016-09-20  9:47 UTC (permalink / raw)
  To: Shawn Lin, Jaehoon Chung
  Cc: shawn.lin, Ulf Hansson, linux-mmc, linux-kernel, Doug Anderson,
	Heiko Stuebner, linux-rockchip

Hi Jaehoon,

Friendly ping... :)

On 2016/9/2 12:14, Shawn Lin wrote:
> We could see an obvious race condition by test that
> the former write operation by IDMAC aiming to clear
> OWN bit reach right after the later configuration of
> the same desc, which makes the IDMAC be in SUSPEND
> state as the OWN bit was cleared by the asynchronous
> write operation of IDMAC. The bug can be very easy
> reproduced on RK3288 or similar when we reduce the
> running rate of system buses and keep the CPU running
> faster. So as two separate masters, IDMAC and cpu
> write the same descriptor stored on the same address,
> and this should be protected by adding check of OWN
> bit before preparing new descriptors.
>
> Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
>
> ---
>
> Changes in v2:
> - fallback to PIO mode if failing to wait for OWN bit
> - cleanup polluted desc chain as the own bit error could
>   occur on any place of the chain, so we need to clr the desc
>   configured before that one. Use the exist function to reinit
>   the desc chain. As this issue is really rare, so after applied,
>   the fio/iozone stress test didn't show up performance regression.
>
>  drivers/mmc/host/dw_mmc.c | 208 ++++++++++++++++++++++++++++------------------
>  1 file changed, 129 insertions(+), 79 deletions(-)
>
> diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
> index 782b303..daa1c52 100644
> --- a/drivers/mmc/host/dw_mmc.c
> +++ b/drivers/mmc/host/dw_mmc.c
> @@ -467,12 +467,87 @@ static void dw_mci_dmac_complete_dma(void *arg)
>  	}
>  }
>
> -static inline void dw_mci_prepare_desc64(struct dw_mci *host,
> +static int dw_mci_idmac_init(struct dw_mci *host)
> +{
> +	int i;
> +
> +	if (host->dma_64bit_address == 1) {
> +		struct idmac_desc_64addr *p;
> +		/* Number of descriptors in the ring buffer */
> +		host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc_64addr);
> +
> +		/* Forward link the descriptor list */
> +		for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
> +								i++, p++) {
> +			p->des6 = (host->sg_dma +
> +					(sizeof(struct idmac_desc_64addr) *
> +							(i + 1))) & 0xffffffff;
> +
> +			p->des7 = (u64)(host->sg_dma +
> +					(sizeof(struct idmac_desc_64addr) *
> +							(i + 1))) >> 32;
> +			/* Initialize reserved and buffer size fields to "0" */
> +			p->des1 = 0;
> +			p->des2 = 0;
> +			p->des3 = 0;
> +		}
> +
> +		/* Set the last descriptor as the end-of-ring descriptor */
> +		p->des6 = host->sg_dma & 0xffffffff;
> +		p->des7 = (u64)host->sg_dma >> 32;
> +		p->des0 = IDMAC_DES0_ER;
> +
> +	} else {
> +		struct idmac_desc *p;
> +		/* Number of descriptors in the ring buffer */
> +		host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
> +
> +		/* Forward link the descriptor list */
> +		for (i = 0, p = host->sg_cpu;
> +		     i < host->ring_size - 1;
> +		     i++, p++) {
> +			p->des3 = cpu_to_le32(host->sg_dma +
> +					(sizeof(struct idmac_desc) * (i + 1)));
> +			p->des1 = 0;
> +		}
> +
> +		/* Set the last descriptor as the end-of-ring descriptor */
> +		p->des3 = cpu_to_le32(host->sg_dma);
> +		p->des0 = cpu_to_le32(IDMAC_DES0_ER);
> +	}
> +
> +	dw_mci_idmac_reset(host);
> +
> +	if (host->dma_64bit_address == 1) {
> +		/* Mask out interrupts - get Tx & Rx complete only */
> +		mci_writel(host, IDSTS64, IDMAC_INT_CLR);
> +		mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
> +				SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
> +
> +		/* Set the descriptor base address */
> +		mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
> +		mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
> +
> +	} else {
> +		/* Mask out interrupts - get Tx & Rx complete only */
> +		mci_writel(host, IDSTS, IDMAC_INT_CLR);
> +		mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
> +				SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
> +
> +		/* Set the descriptor base address */
> +		mci_writel(host, DBADDR, host->sg_dma);
> +	}
> +
> +	return 0;
> +}
> +
> +static inline int dw_mci_prepare_desc64(struct dw_mci *host,
>  					 struct mmc_data *data,
>  					 unsigned int sg_len)
>  {
>  	unsigned int desc_len;
>  	struct idmac_desc_64addr *desc_first, *desc_last, *desc;
> +	unsigned long timeout;
>  	int i;
>
>  	desc_first = desc_last = desc = host->sg_cpu;
> @@ -489,6 +564,19 @@ static inline void dw_mci_prepare_desc64(struct dw_mci *host,
>  			length -= desc_len;
>
>  			/*
> +			 * Wait for the former clear OWN bit operation
> +			 * of IDMAC to make sure that this descriptor
> +			 * isn't still owned by IDMAC as IDMAC's write
> +			 * ops and CPU's read ops are asynchronous.
> +			 */
> +			timeout = jiffies + msecs_to_jiffies(100);
> +			while (readl(&desc->des0) & IDMAC_DES0_OWN) {
> +				if (time_after(jiffies, timeout))
> +					goto err_own_bit;
> +				udelay(10);
> +			}
> +
> +			/*
>  			 * Set the OWN bit and disable interrupts
>  			 * for this descriptor
>  			 */
> @@ -516,15 +604,24 @@ static inline void dw_mci_prepare_desc64(struct dw_mci *host,
>  	/* Set last descriptor */
>  	desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
>  	desc_last->des0 |= IDMAC_DES0_LD;
> +
> +	return 0;
> +err_own_bit:
> +	/* restore the descriptor chain as it's polluted */
> +	dev_dbg(host->dev, "desciptor is still owned by IDMAC.\n");
> +	memset(host->sg_cpu, 0, PAGE_SIZE);
> +	dw_mci_idmac_init(host);
> +	return -EINVAL;
>  }
>
>
> -static inline void dw_mci_prepare_desc32(struct dw_mci *host,
> +static inline int dw_mci_prepare_desc32(struct dw_mci *host,
>  					 struct mmc_data *data,
>  					 unsigned int sg_len)
>  {
>  	unsigned int desc_len;
>  	struct idmac_desc *desc_first, *desc_last, *desc;
> +	unsigned long timeout;
>  	int i;
>
>  	desc_first = desc_last = desc = host->sg_cpu;
> @@ -541,6 +638,20 @@ static inline void dw_mci_prepare_desc32(struct dw_mci *host,
>  			length -= desc_len;
>
>  			/*
> +			 * Wait for the former clear OWN bit operation
> +			 * of IDMAC to make sure that this descriptor
> +			 * isn't still owned by IDMAC as IDMAC's write
> +			 * ops and CPU's read ops are asynchronous.
> +			 */
> +			timeout = jiffies + msecs_to_jiffies(100);
> +			while (readl(&desc->des0) &
> +			       cpu_to_le32(IDMAC_DES0_OWN)) {
> +				if (time_after(jiffies, timeout))
> +					goto err_own_bit;
> +				udelay(10);
> +			}
> +
> +			/*
>  			 * Set the OWN bit and disable interrupts
>  			 * for this descriptor
>  			 */
> @@ -569,16 +680,28 @@ static inline void dw_mci_prepare_desc32(struct dw_mci *host,
>  	desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
>  				       IDMAC_DES0_DIC));
>  	desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
> +
> +	return 0;
> +err_own_bit:
> +	/* restore the descriptor chain as it's polluted */
> +	dev_dbg(host->dev, "desciptor is still owned by IDMAC.\n");
> +	memset(host->sg_cpu, 0, PAGE_SIZE);
> +	dw_mci_idmac_init(host);
> +	return -EINVAL;
>  }
>
>  static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
>  {
>  	u32 temp;
> +	int ret;
>
>  	if (host->dma_64bit_address == 1)
> -		dw_mci_prepare_desc64(host, host->data, sg_len);
> +		ret = dw_mci_prepare_desc64(host, host->data, sg_len);
>  	else
> -		dw_mci_prepare_desc32(host, host->data, sg_len);
> +		ret = dw_mci_prepare_desc32(host, host->data, sg_len);
> +
> +	if (ret)
> +		goto out;
>
>  	/* drain writebuffer */
>  	wmb();
> @@ -603,81 +726,8 @@ static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
>  	/* Start it running */
>  	mci_writel(host, PLDMND, 1);
>
> -	return 0;
> -}
> -
> -static int dw_mci_idmac_init(struct dw_mci *host)
> -{
> -	int i;
> -
> -	if (host->dma_64bit_address == 1) {
> -		struct idmac_desc_64addr *p;
> -		/* Number of descriptors in the ring buffer */
> -		host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc_64addr);
> -
> -		/* Forward link the descriptor list */
> -		for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
> -								i++, p++) {
> -			p->des6 = (host->sg_dma +
> -					(sizeof(struct idmac_desc_64addr) *
> -							(i + 1))) & 0xffffffff;
> -
> -			p->des7 = (u64)(host->sg_dma +
> -					(sizeof(struct idmac_desc_64addr) *
> -							(i + 1))) >> 32;
> -			/* Initialize reserved and buffer size fields to "0" */
> -			p->des1 = 0;
> -			p->des2 = 0;
> -			p->des3 = 0;
> -		}
> -
> -		/* Set the last descriptor as the end-of-ring descriptor */
> -		p->des6 = host->sg_dma & 0xffffffff;
> -		p->des7 = (u64)host->sg_dma >> 32;
> -		p->des0 = IDMAC_DES0_ER;
> -
> -	} else {
> -		struct idmac_desc *p;
> -		/* Number of descriptors in the ring buffer */
> -		host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
> -
> -		/* Forward link the descriptor list */
> -		for (i = 0, p = host->sg_cpu;
> -		     i < host->ring_size - 1;
> -		     i++, p++) {
> -			p->des3 = cpu_to_le32(host->sg_dma +
> -					(sizeof(struct idmac_desc) * (i + 1)));
> -			p->des1 = 0;
> -		}
> -
> -		/* Set the last descriptor as the end-of-ring descriptor */
> -		p->des3 = cpu_to_le32(host->sg_dma);
> -		p->des0 = cpu_to_le32(IDMAC_DES0_ER);
> -	}
> -
> -	dw_mci_idmac_reset(host);
> -
> -	if (host->dma_64bit_address == 1) {
> -		/* Mask out interrupts - get Tx & Rx complete only */
> -		mci_writel(host, IDSTS64, IDMAC_INT_CLR);
> -		mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
> -				SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
> -
> -		/* Set the descriptor base address */
> -		mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
> -		mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
> -
> -	} else {
> -		/* Mask out interrupts - get Tx & Rx complete only */
> -		mci_writel(host, IDSTS, IDMAC_INT_CLR);
> -		mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
> -				SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
> -
> -		/* Set the descriptor base address */
> -		mci_writel(host, DBADDR, host->sg_dma);
> -	}
> -
> -	return 0;
> +out:
> +	return ret;
>  }
>
>  static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
>


-- 
Best Regards
Shawn Lin

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v2 2/4] mmc: dw_mmc: avoid race condition of cpu and IDMAC
  2016-09-20  9:47     ` Shawn Lin
@ 2016-09-20  9:49       ` Jaehoon Chung
  2016-09-21  1:03         ` Shawn Lin
  0 siblings, 1 reply; 11+ messages in thread
From: Jaehoon Chung @ 2016-09-20  9:49 UTC (permalink / raw)
  To: Shawn Lin
  Cc: Ulf Hansson, linux-mmc, linux-kernel, Doug Anderson,
	Heiko Stuebner, linux-rockchip

Hi Shawn,

On 09/20/2016 06:47 PM, Shawn Lin wrote:
> Hi Jaehoon,
> 
> Friendly ping... :)

Thanks for reminding! :) I forgot your patch-set..sorry!

Best Regards,
Jaehoon Chung
> 
> On 2016/9/2 12:14, Shawn Lin wrote:
>> We could see an obvious race condition by test that
>> the former write operation by IDMAC aiming to clear
>> OWN bit reach right after the later configuration of
>> the same desc, which makes the IDMAC be in SUSPEND
>> state as the OWN bit was cleared by the asynchronous
>> write operation of IDMAC. The bug can be very easy
>> reproduced on RK3288 or similar when we reduce the
>> running rate of system buses and keep the CPU running
>> faster. So as two separate masters, IDMAC and cpu
>> write the same descriptor stored on the same address,
>> and this should be protected by adding check of OWN
>> bit before preparing new descriptors.
>>
>> Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
>>
>> ---
>>
>> Changes in v2:
>> - fallback to PIO mode if failing to wait for OWN bit
>> - cleanup polluted desc chain as the own bit error could
>>   occur on any place of the chain, so we need to clr the desc
>>   configured before that one. Use the exist function to reinit
>>   the desc chain. As this issue is really rare, so after applied,
>>   the fio/iozone stress test didn't show up performance regression.
>>
>>  drivers/mmc/host/dw_mmc.c | 208 ++++++++++++++++++++++++++++------------------
>>  1 file changed, 129 insertions(+), 79 deletions(-)
>>
>> diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
>> index 782b303..daa1c52 100644
>> --- a/drivers/mmc/host/dw_mmc.c
>> +++ b/drivers/mmc/host/dw_mmc.c
>> @@ -467,12 +467,87 @@ static void dw_mci_dmac_complete_dma(void *arg)
>>      }
>>  }
>>
>> -static inline void dw_mci_prepare_desc64(struct dw_mci *host,
>> +static int dw_mci_idmac_init(struct dw_mci *host)
>> +{
>> +    int i;
>> +
>> +    if (host->dma_64bit_address == 1) {
>> +        struct idmac_desc_64addr *p;
>> +        /* Number of descriptors in the ring buffer */
>> +        host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc_64addr);
>> +
>> +        /* Forward link the descriptor list */
>> +        for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
>> +                                i++, p++) {
>> +            p->des6 = (host->sg_dma +
>> +                    (sizeof(struct idmac_desc_64addr) *
>> +                            (i + 1))) & 0xffffffff;
>> +
>> +            p->des7 = (u64)(host->sg_dma +
>> +                    (sizeof(struct idmac_desc_64addr) *
>> +                            (i + 1))) >> 32;
>> +            /* Initialize reserved and buffer size fields to "0" */
>> +            p->des1 = 0;
>> +            p->des2 = 0;
>> +            p->des3 = 0;
>> +        }
>> +
>> +        /* Set the last descriptor as the end-of-ring descriptor */
>> +        p->des6 = host->sg_dma & 0xffffffff;
>> +        p->des7 = (u64)host->sg_dma >> 32;
>> +        p->des0 = IDMAC_DES0_ER;
>> +
>> +    } else {
>> +        struct idmac_desc *p;
>> +        /* Number of descriptors in the ring buffer */
>> +        host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
>> +
>> +        /* Forward link the descriptor list */
>> +        for (i = 0, p = host->sg_cpu;
>> +             i < host->ring_size - 1;
>> +             i++, p++) {
>> +            p->des3 = cpu_to_le32(host->sg_dma +
>> +                    (sizeof(struct idmac_desc) * (i + 1)));
>> +            p->des1 = 0;
>> +        }
>> +
>> +        /* Set the last descriptor as the end-of-ring descriptor */
>> +        p->des3 = cpu_to_le32(host->sg_dma);
>> +        p->des0 = cpu_to_le32(IDMAC_DES0_ER);
>> +    }
>> +
>> +    dw_mci_idmac_reset(host);
>> +
>> +    if (host->dma_64bit_address == 1) {
>> +        /* Mask out interrupts - get Tx & Rx complete only */
>> +        mci_writel(host, IDSTS64, IDMAC_INT_CLR);
>> +        mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
>> +                SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
>> +
>> +        /* Set the descriptor base address */
>> +        mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
>> +        mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
>> +
>> +    } else {
>> +        /* Mask out interrupts - get Tx & Rx complete only */
>> +        mci_writel(host, IDSTS, IDMAC_INT_CLR);
>> +        mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
>> +                SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
>> +
>> +        /* Set the descriptor base address */
>> +        mci_writel(host, DBADDR, host->sg_dma);
>> +    }
>> +
>> +    return 0;
>> +}
>> +
>> +static inline int dw_mci_prepare_desc64(struct dw_mci *host,
>>                       struct mmc_data *data,
>>                       unsigned int sg_len)
>>  {
>>      unsigned int desc_len;
>>      struct idmac_desc_64addr *desc_first, *desc_last, *desc;
>> +    unsigned long timeout;
>>      int i;
>>
>>      desc_first = desc_last = desc = host->sg_cpu;
>> @@ -489,6 +564,19 @@ static inline void dw_mci_prepare_desc64(struct dw_mci *host,
>>              length -= desc_len;
>>
>>              /*
>> +             * Wait for the former clear OWN bit operation
>> +             * of IDMAC to make sure that this descriptor
>> +             * isn't still owned by IDMAC as IDMAC's write
>> +             * ops and CPU's read ops are asynchronous.
>> +             */
>> +            timeout = jiffies + msecs_to_jiffies(100);
>> +            while (readl(&desc->des0) & IDMAC_DES0_OWN) {
>> +                if (time_after(jiffies, timeout))
>> +                    goto err_own_bit;
>> +                udelay(10);
>> +            }
>> +
>> +            /*
>>               * Set the OWN bit and disable interrupts
>>               * for this descriptor
>>               */
>> @@ -516,15 +604,24 @@ static inline void dw_mci_prepare_desc64(struct dw_mci *host,
>>      /* Set last descriptor */
>>      desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
>>      desc_last->des0 |= IDMAC_DES0_LD;
>> +
>> +    return 0;
>> +err_own_bit:
>> +    /* restore the descriptor chain as it's polluted */
>> +    dev_dbg(host->dev, "desciptor is still owned by IDMAC.\n");
>> +    memset(host->sg_cpu, 0, PAGE_SIZE);
>> +    dw_mci_idmac_init(host);
>> +    return -EINVAL;
>>  }
>>
>>
>> -static inline void dw_mci_prepare_desc32(struct dw_mci *host,
>> +static inline int dw_mci_prepare_desc32(struct dw_mci *host,
>>                       struct mmc_data *data,
>>                       unsigned int sg_len)
>>  {
>>      unsigned int desc_len;
>>      struct idmac_desc *desc_first, *desc_last, *desc;
>> +    unsigned long timeout;
>>      int i;
>>
>>      desc_first = desc_last = desc = host->sg_cpu;
>> @@ -541,6 +638,20 @@ static inline void dw_mci_prepare_desc32(struct dw_mci *host,
>>              length -= desc_len;
>>
>>              /*
>> +             * Wait for the former clear OWN bit operation
>> +             * of IDMAC to make sure that this descriptor
>> +             * isn't still owned by IDMAC as IDMAC's write
>> +             * ops and CPU's read ops are asynchronous.
>> +             */
>> +            timeout = jiffies + msecs_to_jiffies(100);
>> +            while (readl(&desc->des0) &
>> +                   cpu_to_le32(IDMAC_DES0_OWN)) {
>> +                if (time_after(jiffies, timeout))
>> +                    goto err_own_bit;
>> +                udelay(10);
>> +            }
>> +
>> +            /*
>>               * Set the OWN bit and disable interrupts
>>               * for this descriptor
>>               */
>> @@ -569,16 +680,28 @@ static inline void dw_mci_prepare_desc32(struct dw_mci *host,
>>      desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
>>                         IDMAC_DES0_DIC));
>>      desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
>> +
>> +    return 0;
>> +err_own_bit:
>> +    /* restore the descriptor chain as it's polluted */
>> +    dev_dbg(host->dev, "desciptor is still owned by IDMAC.\n");
>> +    memset(host->sg_cpu, 0, PAGE_SIZE);
>> +    dw_mci_idmac_init(host);
>> +    return -EINVAL;
>>  }
>>
>>  static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
>>  {
>>      u32 temp;
>> +    int ret;
>>
>>      if (host->dma_64bit_address == 1)
>> -        dw_mci_prepare_desc64(host, host->data, sg_len);
>> +        ret = dw_mci_prepare_desc64(host, host->data, sg_len);
>>      else
>> -        dw_mci_prepare_desc32(host, host->data, sg_len);
>> +        ret = dw_mci_prepare_desc32(host, host->data, sg_len);
>> +
>> +    if (ret)
>> +        goto out;
>>
>>      /* drain writebuffer */
>>      wmb();
>> @@ -603,81 +726,8 @@ static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
>>      /* Start it running */
>>      mci_writel(host, PLDMND, 1);
>>
>> -    return 0;
>> -}
>> -
>> -static int dw_mci_idmac_init(struct dw_mci *host)
>> -{
>> -    int i;
>> -
>> -    if (host->dma_64bit_address == 1) {
>> -        struct idmac_desc_64addr *p;
>> -        /* Number of descriptors in the ring buffer */
>> -        host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc_64addr);
>> -
>> -        /* Forward link the descriptor list */
>> -        for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
>> -                                i++, p++) {
>> -            p->des6 = (host->sg_dma +
>> -                    (sizeof(struct idmac_desc_64addr) *
>> -                            (i + 1))) & 0xffffffff;
>> -
>> -            p->des7 = (u64)(host->sg_dma +
>> -                    (sizeof(struct idmac_desc_64addr) *
>> -                            (i + 1))) >> 32;
>> -            /* Initialize reserved and buffer size fields to "0" */
>> -            p->des1 = 0;
>> -            p->des2 = 0;
>> -            p->des3 = 0;
>> -        }
>> -
>> -        /* Set the last descriptor as the end-of-ring descriptor */
>> -        p->des6 = host->sg_dma & 0xffffffff;
>> -        p->des7 = (u64)host->sg_dma >> 32;
>> -        p->des0 = IDMAC_DES0_ER;
>> -
>> -    } else {
>> -        struct idmac_desc *p;
>> -        /* Number of descriptors in the ring buffer */
>> -        host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
>> -
>> -        /* Forward link the descriptor list */
>> -        for (i = 0, p = host->sg_cpu;
>> -             i < host->ring_size - 1;
>> -             i++, p++) {
>> -            p->des3 = cpu_to_le32(host->sg_dma +
>> -                    (sizeof(struct idmac_desc) * (i + 1)));
>> -            p->des1 = 0;
>> -        }
>> -
>> -        /* Set the last descriptor as the end-of-ring descriptor */
>> -        p->des3 = cpu_to_le32(host->sg_dma);
>> -        p->des0 = cpu_to_le32(IDMAC_DES0_ER);
>> -    }
>> -
>> -    dw_mci_idmac_reset(host);
>> -
>> -    if (host->dma_64bit_address == 1) {
>> -        /* Mask out interrupts - get Tx & Rx complete only */
>> -        mci_writel(host, IDSTS64, IDMAC_INT_CLR);
>> -        mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
>> -                SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
>> -
>> -        /* Set the descriptor base address */
>> -        mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
>> -        mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
>> -
>> -    } else {
>> -        /* Mask out interrupts - get Tx & Rx complete only */
>> -        mci_writel(host, IDSTS, IDMAC_INT_CLR);
>> -        mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
>> -                SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
>> -
>> -        /* Set the descriptor base address */
>> -        mci_writel(host, DBADDR, host->sg_dma);
>> -    }
>> -
>> -    return 0;
>> +out:
>> +    return ret;
>>  }
>>
>>  static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
>>
> 
> 

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v2 2/4] mmc: dw_mmc: avoid race condition of cpu and IDMAC
  2016-09-20  9:49       ` Jaehoon Chung
@ 2016-09-21  1:03         ` Shawn Lin
  0 siblings, 0 replies; 11+ messages in thread
From: Shawn Lin @ 2016-09-21  1:03 UTC (permalink / raw)
  To: Jaehoon Chung
  Cc: shawn.lin, Ulf Hansson, linux-mmc, linux-kernel, Doug Anderson,
	Heiko Stuebner, linux-rockchip

在 2016/9/20 17:49, Jaehoon Chung 写道:
> Hi Shawn,
>
> On 09/20/2016 06:47 PM, Shawn Lin wrote:
>> Hi Jaehoon,
>>
>> Friendly ping... :)
>
> Thanks for reminding! :) I forgot your patch-set..sorry!

Ah, never mind, I just want to make sure if I still need
to update it.:)


>
> Best Regards,
> Jaehoon Chung
>>
>> On 2016/9/2 12:14, Shawn Lin wrote:
>>> We could see an obvious race condition by test that
>>> the former write operation by IDMAC aiming to clear
>>> OWN bit reach right after the later configuration of
>>> the same desc, which makes the IDMAC be in SUSPEND
>>> state as the OWN bit was cleared by the asynchronous
>>> write operation of IDMAC. The bug can be very easy
>>> reproduced on RK3288 or similar when we reduce the
>>> running rate of system buses and keep the CPU running
>>> faster. So as two separate masters, IDMAC and cpu
>>> write the same descriptor stored on the same address,
>>> and this should be protected by adding check of OWN
>>> bit before preparing new descriptors.
>>>
>>> Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
>>>
>>> ---
>>>
>>> Changes in v2:
>>> - fallback to PIO mode if failing to wait for OWN bit
>>> - cleanup polluted desc chain as the own bit error could
>>>   occur on any place of the chain, so we need to clr the desc
>>>   configured before that one. Use the exist function to reinit
>>>   the desc chain. As this issue is really rare, so after applied,
>>>   the fio/iozone stress test didn't show up performance regression.
>>>
>>>  drivers/mmc/host/dw_mmc.c | 208 ++++++++++++++++++++++++++++------------------
>>>  1 file changed, 129 insertions(+), 79 deletions(-)
>>>
>>> diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
>>> index 782b303..daa1c52 100644
>>> --- a/drivers/mmc/host/dw_mmc.c
>>> +++ b/drivers/mmc/host/dw_mmc.c
>>> @@ -467,12 +467,87 @@ static void dw_mci_dmac_complete_dma(void *arg)
>>>      }
>>>  }
>>>
>>> -static inline void dw_mci_prepare_desc64(struct dw_mci *host,
>>> +static int dw_mci_idmac_init(struct dw_mci *host)
>>> +{
>>> +    int i;
>>> +
>>> +    if (host->dma_64bit_address == 1) {
>>> +        struct idmac_desc_64addr *p;
>>> +        /* Number of descriptors in the ring buffer */
>>> +        host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc_64addr);
>>> +
>>> +        /* Forward link the descriptor list */
>>> +        for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
>>> +                                i++, p++) {
>>> +            p->des6 = (host->sg_dma +
>>> +                    (sizeof(struct idmac_desc_64addr) *
>>> +                            (i + 1))) & 0xffffffff;
>>> +
>>> +            p->des7 = (u64)(host->sg_dma +
>>> +                    (sizeof(struct idmac_desc_64addr) *
>>> +                            (i + 1))) >> 32;
>>> +            /* Initialize reserved and buffer size fields to "0" */
>>> +            p->des1 = 0;
>>> +            p->des2 = 0;
>>> +            p->des3 = 0;
>>> +        }
>>> +
>>> +        /* Set the last descriptor as the end-of-ring descriptor */
>>> +        p->des6 = host->sg_dma & 0xffffffff;
>>> +        p->des7 = (u64)host->sg_dma >> 32;
>>> +        p->des0 = IDMAC_DES0_ER;
>>> +
>>> +    } else {
>>> +        struct idmac_desc *p;
>>> +        /* Number of descriptors in the ring buffer */
>>> +        host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
>>> +
>>> +        /* Forward link the descriptor list */
>>> +        for (i = 0, p = host->sg_cpu;
>>> +             i < host->ring_size - 1;
>>> +             i++, p++) {
>>> +            p->des3 = cpu_to_le32(host->sg_dma +
>>> +                    (sizeof(struct idmac_desc) * (i + 1)));
>>> +            p->des1 = 0;
>>> +        }
>>> +
>>> +        /* Set the last descriptor as the end-of-ring descriptor */
>>> +        p->des3 = cpu_to_le32(host->sg_dma);
>>> +        p->des0 = cpu_to_le32(IDMAC_DES0_ER);
>>> +    }
>>> +
>>> +    dw_mci_idmac_reset(host);
>>> +
>>> +    if (host->dma_64bit_address == 1) {
>>> +        /* Mask out interrupts - get Tx & Rx complete only */
>>> +        mci_writel(host, IDSTS64, IDMAC_INT_CLR);
>>> +        mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
>>> +                SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
>>> +
>>> +        /* Set the descriptor base address */
>>> +        mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
>>> +        mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
>>> +
>>> +    } else {
>>> +        /* Mask out interrupts - get Tx & Rx complete only */
>>> +        mci_writel(host, IDSTS, IDMAC_INT_CLR);
>>> +        mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
>>> +                SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
>>> +
>>> +        /* Set the descriptor base address */
>>> +        mci_writel(host, DBADDR, host->sg_dma);
>>> +    }
>>> +
>>> +    return 0;
>>> +}
>>> +
>>> +static inline int dw_mci_prepare_desc64(struct dw_mci *host,
>>>                       struct mmc_data *data,
>>>                       unsigned int sg_len)
>>>  {
>>>      unsigned int desc_len;
>>>      struct idmac_desc_64addr *desc_first, *desc_last, *desc;
>>> +    unsigned long timeout;
>>>      int i;
>>>
>>>      desc_first = desc_last = desc = host->sg_cpu;
>>> @@ -489,6 +564,19 @@ static inline void dw_mci_prepare_desc64(struct dw_mci *host,
>>>              length -= desc_len;
>>>
>>>              /*
>>> +             * Wait for the former clear OWN bit operation
>>> +             * of IDMAC to make sure that this descriptor
>>> +             * isn't still owned by IDMAC as IDMAC's write
>>> +             * ops and CPU's read ops are asynchronous.
>>> +             */
>>> +            timeout = jiffies + msecs_to_jiffies(100);
>>> +            while (readl(&desc->des0) & IDMAC_DES0_OWN) {
>>> +                if (time_after(jiffies, timeout))
>>> +                    goto err_own_bit;
>>> +                udelay(10);
>>> +            }
>>> +
>>> +            /*
>>>               * Set the OWN bit and disable interrupts
>>>               * for this descriptor
>>>               */
>>> @@ -516,15 +604,24 @@ static inline void dw_mci_prepare_desc64(struct dw_mci *host,
>>>      /* Set last descriptor */
>>>      desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
>>>      desc_last->des0 |= IDMAC_DES0_LD;
>>> +
>>> +    return 0;
>>> +err_own_bit:
>>> +    /* restore the descriptor chain as it's polluted */
>>> +    dev_dbg(host->dev, "desciptor is still owned by IDMAC.\n");
>>> +    memset(host->sg_cpu, 0, PAGE_SIZE);
>>> +    dw_mci_idmac_init(host);
>>> +    return -EINVAL;
>>>  }
>>>
>>>
>>> -static inline void dw_mci_prepare_desc32(struct dw_mci *host,
>>> +static inline int dw_mci_prepare_desc32(struct dw_mci *host,
>>>                       struct mmc_data *data,
>>>                       unsigned int sg_len)
>>>  {
>>>      unsigned int desc_len;
>>>      struct idmac_desc *desc_first, *desc_last, *desc;
>>> +    unsigned long timeout;
>>>      int i;
>>>
>>>      desc_first = desc_last = desc = host->sg_cpu;
>>> @@ -541,6 +638,20 @@ static inline void dw_mci_prepare_desc32(struct dw_mci *host,
>>>              length -= desc_len;
>>>
>>>              /*
>>> +             * Wait for the former clear OWN bit operation
>>> +             * of IDMAC to make sure that this descriptor
>>> +             * isn't still owned by IDMAC as IDMAC's write
>>> +             * ops and CPU's read ops are asynchronous.
>>> +             */
>>> +            timeout = jiffies + msecs_to_jiffies(100);
>>> +            while (readl(&desc->des0) &
>>> +                   cpu_to_le32(IDMAC_DES0_OWN)) {
>>> +                if (time_after(jiffies, timeout))
>>> +                    goto err_own_bit;
>>> +                udelay(10);
>>> +            }
>>> +
>>> +            /*
>>>               * Set the OWN bit and disable interrupts
>>>               * for this descriptor
>>>               */
>>> @@ -569,16 +680,28 @@ static inline void dw_mci_prepare_desc32(struct dw_mci *host,
>>>      desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
>>>                         IDMAC_DES0_DIC));
>>>      desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
>>> +
>>> +    return 0;
>>> +err_own_bit:
>>> +    /* restore the descriptor chain as it's polluted */
>>> +    dev_dbg(host->dev, "desciptor is still owned by IDMAC.\n");
>>> +    memset(host->sg_cpu, 0, PAGE_SIZE);
>>> +    dw_mci_idmac_init(host);
>>> +    return -EINVAL;
>>>  }
>>>
>>>  static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
>>>  {
>>>      u32 temp;
>>> +    int ret;
>>>
>>>      if (host->dma_64bit_address == 1)
>>> -        dw_mci_prepare_desc64(host, host->data, sg_len);
>>> +        ret = dw_mci_prepare_desc64(host, host->data, sg_len);
>>>      else
>>> -        dw_mci_prepare_desc32(host, host->data, sg_len);
>>> +        ret = dw_mci_prepare_desc32(host, host->data, sg_len);
>>> +
>>> +    if (ret)
>>> +        goto out;
>>>
>>>      /* drain writebuffer */
>>>      wmb();
>>> @@ -603,81 +726,8 @@ static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
>>>      /* Start it running */
>>>      mci_writel(host, PLDMND, 1);
>>>
>>> -    return 0;
>>> -}
>>> -
>>> -static int dw_mci_idmac_init(struct dw_mci *host)
>>> -{
>>> -    int i;
>>> -
>>> -    if (host->dma_64bit_address == 1) {
>>> -        struct idmac_desc_64addr *p;
>>> -        /* Number of descriptors in the ring buffer */
>>> -        host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc_64addr);
>>> -
>>> -        /* Forward link the descriptor list */
>>> -        for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
>>> -                                i++, p++) {
>>> -            p->des6 = (host->sg_dma +
>>> -                    (sizeof(struct idmac_desc_64addr) *
>>> -                            (i + 1))) & 0xffffffff;
>>> -
>>> -            p->des7 = (u64)(host->sg_dma +
>>> -                    (sizeof(struct idmac_desc_64addr) *
>>> -                            (i + 1))) >> 32;
>>> -            /* Initialize reserved and buffer size fields to "0" */
>>> -            p->des1 = 0;
>>> -            p->des2 = 0;
>>> -            p->des3 = 0;
>>> -        }
>>> -
>>> -        /* Set the last descriptor as the end-of-ring descriptor */
>>> -        p->des6 = host->sg_dma & 0xffffffff;
>>> -        p->des7 = (u64)host->sg_dma >> 32;
>>> -        p->des0 = IDMAC_DES0_ER;
>>> -
>>> -    } else {
>>> -        struct idmac_desc *p;
>>> -        /* Number of descriptors in the ring buffer */
>>> -        host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
>>> -
>>> -        /* Forward link the descriptor list */
>>> -        for (i = 0, p = host->sg_cpu;
>>> -             i < host->ring_size - 1;
>>> -             i++, p++) {
>>> -            p->des3 = cpu_to_le32(host->sg_dma +
>>> -                    (sizeof(struct idmac_desc) * (i + 1)));
>>> -            p->des1 = 0;
>>> -        }
>>> -
>>> -        /* Set the last descriptor as the end-of-ring descriptor */
>>> -        p->des3 = cpu_to_le32(host->sg_dma);
>>> -        p->des0 = cpu_to_le32(IDMAC_DES0_ER);
>>> -    }
>>> -
>>> -    dw_mci_idmac_reset(host);
>>> -
>>> -    if (host->dma_64bit_address == 1) {
>>> -        /* Mask out interrupts - get Tx & Rx complete only */
>>> -        mci_writel(host, IDSTS64, IDMAC_INT_CLR);
>>> -        mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
>>> -                SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
>>> -
>>> -        /* Set the descriptor base address */
>>> -        mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
>>> -        mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
>>> -
>>> -    } else {
>>> -        /* Mask out interrupts - get Tx & Rx complete only */
>>> -        mci_writel(host, IDSTS, IDMAC_INT_CLR);
>>> -        mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
>>> -                SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
>>> -
>>> -        /* Set the descriptor base address */
>>> -        mci_writel(host, DBADDR, host->sg_dma);
>>> -    }
>>> -
>>> -    return 0;
>>> +out:
>>> +    return ret;
>>>  }
>>>
>>>  static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
>>>
>>
>>
>
>
>
>


-- 
Best Regards
Shawn Lin

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v2 1/4] mmc: dw_mmc: split out preparation of desc for IDMAC32 and IDMAC64
  2016-09-02  4:14 ` [PATCH v2 1/4] mmc: dw_mmc: split out preparation of desc for IDMAC32 and IDMAC64 Shawn Lin
                     ` (2 preceding siblings ...)
  2016-09-02  4:14   ` [PATCH v2 4/4] mmc: dw_mmc: use macro to define ring buffer size Shawn Lin
@ 2016-09-22  4:25   ` Jaehoon Chung
  3 siblings, 0 replies; 11+ messages in thread
From: Jaehoon Chung @ 2016-09-22  4:25 UTC (permalink / raw)
  To: Shawn Lin
  Cc: Ulf Hansson, linux-mmc, linux-kernel, Doug Anderson,
	Heiko Stuebner, linux-rockchip

On 09/02/2016 01:14 PM, Shawn Lin wrote:
> We intend to add more check for descriptors when
> preparing desc. Let's spilt out the separate body
> to make the dw_mci_translate_sglist not so lengthy.
> After spliting out these two functions, we could
> remove dw_mci_translate_sglist and call both of them
> when staring idmac.

Applied on my tree. Thanks!

Best Regards,
Jaehoon Chung

> 
> Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
> 
> ---
> 
> Changes in v2:
> - remove dw_mci_translate_sglist
> 
>  drivers/mmc/host/dw_mmc.c | 149 ++++++++++++++++++++++++----------------------
>  1 file changed, 79 insertions(+), 70 deletions(-)
> 
> diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
> index 22dacae..782b303 100644
> --- a/drivers/mmc/host/dw_mmc.c
> +++ b/drivers/mmc/host/dw_mmc.c
> @@ -467,112 +467,121 @@ static void dw_mci_dmac_complete_dma(void *arg)
>  	}
>  }
>  
> -static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
> -				    unsigned int sg_len)
> +static inline void dw_mci_prepare_desc64(struct dw_mci *host,
> +					 struct mmc_data *data,
> +					 unsigned int sg_len)
>  {
>  	unsigned int desc_len;
> +	struct idmac_desc_64addr *desc_first, *desc_last, *desc;
>  	int i;
>  
> -	if (host->dma_64bit_address == 1) {
> -		struct idmac_desc_64addr *desc_first, *desc_last, *desc;
> -
> -		desc_first = desc_last = desc = host->sg_cpu;
> +	desc_first = desc_last = desc = host->sg_cpu;
>  
> -		for (i = 0; i < sg_len; i++) {
> -			unsigned int length = sg_dma_len(&data->sg[i]);
> +	for (i = 0; i < sg_len; i++) {
> +		unsigned int length = sg_dma_len(&data->sg[i]);
>  
> -			u64 mem_addr = sg_dma_address(&data->sg[i]);
> +		u64 mem_addr = sg_dma_address(&data->sg[i]);
>  
> -			for ( ; length ; desc++) {
> -				desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
> -					   length : DW_MCI_DESC_DATA_LENGTH;
> +		for ( ; length ; desc++) {
> +			desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
> +				   length : DW_MCI_DESC_DATA_LENGTH;
>  
> -				length -= desc_len;
> +			length -= desc_len;
>  
> -				/*
> -				 * Set the OWN bit and disable interrupts
> -				 * for this descriptor
> -				 */
> -				desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
> -							IDMAC_DES0_CH;
> +			/*
> +			 * Set the OWN bit and disable interrupts
> +			 * for this descriptor
> +			 */
> +			desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
> +						IDMAC_DES0_CH;
>  
> -				/* Buffer length */
> -				IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
> +			/* Buffer length */
> +			IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
>  
> -				/* Physical address to DMA to/from */
> -				desc->des4 = mem_addr & 0xffffffff;
> -				desc->des5 = mem_addr >> 32;
> +			/* Physical address to DMA to/from */
> +			desc->des4 = mem_addr & 0xffffffff;
> +			desc->des5 = mem_addr >> 32;
>  
> -				/* Update physical address for the next desc */
> -				mem_addr += desc_len;
> +			/* Update physical address for the next desc */
> +			mem_addr += desc_len;
>  
> -				/* Save pointer to the last descriptor */
> -				desc_last = desc;
> -			}
> +			/* Save pointer to the last descriptor */
> +			desc_last = desc;
>  		}
> +	}
>  
> -		/* Set first descriptor */
> -		desc_first->des0 |= IDMAC_DES0_FD;
> +	/* Set first descriptor */
> +	desc_first->des0 |= IDMAC_DES0_FD;
>  
> -		/* Set last descriptor */
> -		desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
> -		desc_last->des0 |= IDMAC_DES0_LD;
> +	/* Set last descriptor */
> +	desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
> +	desc_last->des0 |= IDMAC_DES0_LD;
> +}
>  
> -	} else {
> -		struct idmac_desc *desc_first, *desc_last, *desc;
>  
> -		desc_first = desc_last = desc = host->sg_cpu;
> +static inline void dw_mci_prepare_desc32(struct dw_mci *host,
> +					 struct mmc_data *data,
> +					 unsigned int sg_len)
> +{
> +	unsigned int desc_len;
> +	struct idmac_desc *desc_first, *desc_last, *desc;
> +	int i;
>  
> -		for (i = 0; i < sg_len; i++) {
> -			unsigned int length = sg_dma_len(&data->sg[i]);
> +	desc_first = desc_last = desc = host->sg_cpu;
>  
> -			u32 mem_addr = sg_dma_address(&data->sg[i]);
> +	for (i = 0; i < sg_len; i++) {
> +		unsigned int length = sg_dma_len(&data->sg[i]);
>  
> -			for ( ; length ; desc++) {
> -				desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
> -					   length : DW_MCI_DESC_DATA_LENGTH;
> +		u32 mem_addr = sg_dma_address(&data->sg[i]);
>  
> -				length -= desc_len;
> +		for ( ; length ; desc++) {
> +			desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
> +				   length : DW_MCI_DESC_DATA_LENGTH;
>  
> -				/*
> -				 * Set the OWN bit and disable interrupts
> -				 * for this descriptor
> -				 */
> -				desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
> -							 IDMAC_DES0_DIC |
> -							 IDMAC_DES0_CH);
> +			length -= desc_len;
>  
> -				/* Buffer length */
> -				IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
> +			/*
> +			 * Set the OWN bit and disable interrupts
> +			 * for this descriptor
> +			 */
> +			desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
> +						 IDMAC_DES0_DIC |
> +						 IDMAC_DES0_CH);
>  
> -				/* Physical address to DMA to/from */
> -				desc->des2 = cpu_to_le32(mem_addr);
> +			/* Buffer length */
> +			IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
>  
> -				/* Update physical address for the next desc */
> -				mem_addr += desc_len;
> +			/* Physical address to DMA to/from */
> +			desc->des2 = cpu_to_le32(mem_addr);
>  
> -				/* Save pointer to the last descriptor */
> -				desc_last = desc;
> -			}
> -		}
> -
> -		/* Set first descriptor */
> -		desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
> +			/* Update physical address for the next desc */
> +			mem_addr += desc_len;
>  
> -		/* Set last descriptor */
> -		desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
> -					       IDMAC_DES0_DIC));
> -		desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
> +			/* Save pointer to the last descriptor */
> +			desc_last = desc;
> +		}
>  	}
>  
> -	wmb(); /* drain writebuffer */
> +	/* Set first descriptor */
> +	desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
> +
> +	/* Set last descriptor */
> +	desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
> +				       IDMAC_DES0_DIC));
> +	desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
>  }
>  
>  static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
>  {
>  	u32 temp;
>  
> -	dw_mci_translate_sglist(host, host->data, sg_len);
> +	if (host->dma_64bit_address == 1)
> +		dw_mci_prepare_desc64(host, host->data, sg_len);
> +	else
> +		dw_mci_prepare_desc32(host, host->data, sg_len);
> +
> +	/* drain writebuffer */
> +	wmb();
>  
>  	/* Make sure to reset DMA in case we did PIO before this */
>  	dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
> 

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v2 2/4] mmc: dw_mmc: avoid race condition of cpu and IDMAC
  2016-09-02  4:14   ` [PATCH v2 2/4] mmc: dw_mmc: avoid race condition of cpu and IDMAC Shawn Lin
  2016-09-20  9:47     ` Shawn Lin
@ 2016-09-22  4:26     ` Jaehoon Chung
  1 sibling, 0 replies; 11+ messages in thread
From: Jaehoon Chung @ 2016-09-22  4:26 UTC (permalink / raw)
  To: Shawn Lin
  Cc: Ulf Hansson, linux-mmc, linux-kernel, Doug Anderson,
	Heiko Stuebner, linux-rockchip

On 09/02/2016 01:14 PM, Shawn Lin wrote:
> We could see an obvious race condition by test that
> the former write operation by IDMAC aiming to clear
> OWN bit reach right after the later configuration of
> the same desc, which makes the IDMAC be in SUSPEND
> state as the OWN bit was cleared by the asynchronous
> write operation of IDMAC. The bug can be very easy
> reproduced on RK3288 or similar when we reduce the
> running rate of system buses and keep the CPU running
> faster. So as two separate masters, IDMAC and cpu
> write the same descriptor stored on the same address,
> and this should be protected by adding check of OWN
> bit before preparing new descriptors.

Applied on my tree. Thanks!

Best Regards,
Jaehoon Chung

> 
> Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
> 
> ---
> 
> Changes in v2:
> - fallback to PIO mode if failing to wait for OWN bit
> - cleanup polluted desc chain as the own bit error could
>   occur on any place of the chain, so we need to clr the desc
>   configured before that one. Use the exist function to reinit
>   the desc chain. As this issue is really rare, so after applied,
>   the fio/iozone stress test didn't show up performance regression.
> 
>  drivers/mmc/host/dw_mmc.c | 208 ++++++++++++++++++++++++++++------------------
>  1 file changed, 129 insertions(+), 79 deletions(-)
> 
> diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
> index 782b303..daa1c52 100644
> --- a/drivers/mmc/host/dw_mmc.c
> +++ b/drivers/mmc/host/dw_mmc.c
> @@ -467,12 +467,87 @@ static void dw_mci_dmac_complete_dma(void *arg)
>  	}
>  }
>  
> -static inline void dw_mci_prepare_desc64(struct dw_mci *host,
> +static int dw_mci_idmac_init(struct dw_mci *host)
> +{
> +	int i;
> +
> +	if (host->dma_64bit_address == 1) {
> +		struct idmac_desc_64addr *p;
> +		/* Number of descriptors in the ring buffer */
> +		host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc_64addr);
> +
> +		/* Forward link the descriptor list */
> +		for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
> +								i++, p++) {
> +			p->des6 = (host->sg_dma +
> +					(sizeof(struct idmac_desc_64addr) *
> +							(i + 1))) & 0xffffffff;
> +
> +			p->des7 = (u64)(host->sg_dma +
> +					(sizeof(struct idmac_desc_64addr) *
> +							(i + 1))) >> 32;
> +			/* Initialize reserved and buffer size fields to "0" */
> +			p->des1 = 0;
> +			p->des2 = 0;
> +			p->des3 = 0;
> +		}
> +
> +		/* Set the last descriptor as the end-of-ring descriptor */
> +		p->des6 = host->sg_dma & 0xffffffff;
> +		p->des7 = (u64)host->sg_dma >> 32;
> +		p->des0 = IDMAC_DES0_ER;
> +
> +	} else {
> +		struct idmac_desc *p;
> +		/* Number of descriptors in the ring buffer */
> +		host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
> +
> +		/* Forward link the descriptor list */
> +		for (i = 0, p = host->sg_cpu;
> +		     i < host->ring_size - 1;
> +		     i++, p++) {
> +			p->des3 = cpu_to_le32(host->sg_dma +
> +					(sizeof(struct idmac_desc) * (i + 1)));
> +			p->des1 = 0;
> +		}
> +
> +		/* Set the last descriptor as the end-of-ring descriptor */
> +		p->des3 = cpu_to_le32(host->sg_dma);
> +		p->des0 = cpu_to_le32(IDMAC_DES0_ER);
> +	}
> +
> +	dw_mci_idmac_reset(host);
> +
> +	if (host->dma_64bit_address == 1) {
> +		/* Mask out interrupts - get Tx & Rx complete only */
> +		mci_writel(host, IDSTS64, IDMAC_INT_CLR);
> +		mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
> +				SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
> +
> +		/* Set the descriptor base address */
> +		mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
> +		mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
> +
> +	} else {
> +		/* Mask out interrupts - get Tx & Rx complete only */
> +		mci_writel(host, IDSTS, IDMAC_INT_CLR);
> +		mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
> +				SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
> +
> +		/* Set the descriptor base address */
> +		mci_writel(host, DBADDR, host->sg_dma);
> +	}
> +
> +	return 0;
> +}
> +
> +static inline int dw_mci_prepare_desc64(struct dw_mci *host,
>  					 struct mmc_data *data,
>  					 unsigned int sg_len)
>  {
>  	unsigned int desc_len;
>  	struct idmac_desc_64addr *desc_first, *desc_last, *desc;
> +	unsigned long timeout;
>  	int i;
>  
>  	desc_first = desc_last = desc = host->sg_cpu;
> @@ -489,6 +564,19 @@ static inline void dw_mci_prepare_desc64(struct dw_mci *host,
>  			length -= desc_len;
>  
>  			/*
> +			 * Wait for the former clear OWN bit operation
> +			 * of IDMAC to make sure that this descriptor
> +			 * isn't still owned by IDMAC as IDMAC's write
> +			 * ops and CPU's read ops are asynchronous.
> +			 */
> +			timeout = jiffies + msecs_to_jiffies(100);
> +			while (readl(&desc->des0) & IDMAC_DES0_OWN) {
> +				if (time_after(jiffies, timeout))
> +					goto err_own_bit;
> +				udelay(10);
> +			}
> +
> +			/*
>  			 * Set the OWN bit and disable interrupts
>  			 * for this descriptor
>  			 */
> @@ -516,15 +604,24 @@ static inline void dw_mci_prepare_desc64(struct dw_mci *host,
>  	/* Set last descriptor */
>  	desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
>  	desc_last->des0 |= IDMAC_DES0_LD;
> +
> +	return 0;
> +err_own_bit:
> +	/* restore the descriptor chain as it's polluted */
> +	dev_dbg(host->dev, "desciptor is still owned by IDMAC.\n");
> +	memset(host->sg_cpu, 0, PAGE_SIZE);
> +	dw_mci_idmac_init(host);
> +	return -EINVAL;
>  }
>  
>  
> -static inline void dw_mci_prepare_desc32(struct dw_mci *host,
> +static inline int dw_mci_prepare_desc32(struct dw_mci *host,
>  					 struct mmc_data *data,
>  					 unsigned int sg_len)
>  {
>  	unsigned int desc_len;
>  	struct idmac_desc *desc_first, *desc_last, *desc;
> +	unsigned long timeout;
>  	int i;
>  
>  	desc_first = desc_last = desc = host->sg_cpu;
> @@ -541,6 +638,20 @@ static inline void dw_mci_prepare_desc32(struct dw_mci *host,
>  			length -= desc_len;
>  
>  			/*
> +			 * Wait for the former clear OWN bit operation
> +			 * of IDMAC to make sure that this descriptor
> +			 * isn't still owned by IDMAC as IDMAC's write
> +			 * ops and CPU's read ops are asynchronous.
> +			 */
> +			timeout = jiffies + msecs_to_jiffies(100);
> +			while (readl(&desc->des0) &
> +			       cpu_to_le32(IDMAC_DES0_OWN)) {
> +				if (time_after(jiffies, timeout))
> +					goto err_own_bit;
> +				udelay(10);
> +			}
> +
> +			/*
>  			 * Set the OWN bit and disable interrupts
>  			 * for this descriptor
>  			 */
> @@ -569,16 +680,28 @@ static inline void dw_mci_prepare_desc32(struct dw_mci *host,
>  	desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
>  				       IDMAC_DES0_DIC));
>  	desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
> +
> +	return 0;
> +err_own_bit:
> +	/* restore the descriptor chain as it's polluted */
> +	dev_dbg(host->dev, "desciptor is still owned by IDMAC.\n");
> +	memset(host->sg_cpu, 0, PAGE_SIZE);
> +	dw_mci_idmac_init(host);
> +	return -EINVAL;
>  }
>  
>  static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
>  {
>  	u32 temp;
> +	int ret;
>  
>  	if (host->dma_64bit_address == 1)
> -		dw_mci_prepare_desc64(host, host->data, sg_len);
> +		ret = dw_mci_prepare_desc64(host, host->data, sg_len);
>  	else
> -		dw_mci_prepare_desc32(host, host->data, sg_len);
> +		ret = dw_mci_prepare_desc32(host, host->data, sg_len);
> +
> +	if (ret)
> +		goto out;
>  
>  	/* drain writebuffer */
>  	wmb();
> @@ -603,81 +726,8 @@ static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
>  	/* Start it running */
>  	mci_writel(host, PLDMND, 1);
>  
> -	return 0;
> -}
> -
> -static int dw_mci_idmac_init(struct dw_mci *host)
> -{
> -	int i;
> -
> -	if (host->dma_64bit_address == 1) {
> -		struct idmac_desc_64addr *p;
> -		/* Number of descriptors in the ring buffer */
> -		host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc_64addr);
> -
> -		/* Forward link the descriptor list */
> -		for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
> -								i++, p++) {
> -			p->des6 = (host->sg_dma +
> -					(sizeof(struct idmac_desc_64addr) *
> -							(i + 1))) & 0xffffffff;
> -
> -			p->des7 = (u64)(host->sg_dma +
> -					(sizeof(struct idmac_desc_64addr) *
> -							(i + 1))) >> 32;
> -			/* Initialize reserved and buffer size fields to "0" */
> -			p->des1 = 0;
> -			p->des2 = 0;
> -			p->des3 = 0;
> -		}
> -
> -		/* Set the last descriptor as the end-of-ring descriptor */
> -		p->des6 = host->sg_dma & 0xffffffff;
> -		p->des7 = (u64)host->sg_dma >> 32;
> -		p->des0 = IDMAC_DES0_ER;
> -
> -	} else {
> -		struct idmac_desc *p;
> -		/* Number of descriptors in the ring buffer */
> -		host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
> -
> -		/* Forward link the descriptor list */
> -		for (i = 0, p = host->sg_cpu;
> -		     i < host->ring_size - 1;
> -		     i++, p++) {
> -			p->des3 = cpu_to_le32(host->sg_dma +
> -					(sizeof(struct idmac_desc) * (i + 1)));
> -			p->des1 = 0;
> -		}
> -
> -		/* Set the last descriptor as the end-of-ring descriptor */
> -		p->des3 = cpu_to_le32(host->sg_dma);
> -		p->des0 = cpu_to_le32(IDMAC_DES0_ER);
> -	}
> -
> -	dw_mci_idmac_reset(host);
> -
> -	if (host->dma_64bit_address == 1) {
> -		/* Mask out interrupts - get Tx & Rx complete only */
> -		mci_writel(host, IDSTS64, IDMAC_INT_CLR);
> -		mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
> -				SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
> -
> -		/* Set the descriptor base address */
> -		mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
> -		mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
> -
> -	} else {
> -		/* Mask out interrupts - get Tx & Rx complete only */
> -		mci_writel(host, IDSTS, IDMAC_INT_CLR);
> -		mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
> -				SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
> -
> -		/* Set the descriptor base address */
> -		mci_writel(host, DBADDR, host->sg_dma);
> -	}
> -
> -	return 0;
> +out:
> +	return ret;
>  }
>  
>  static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
> 

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v2 3/4] mmc: dw_mmc: fix misleading error print if failing to do DMA transfer
  2016-09-02  4:14   ` [PATCH v2 3/4] mmc: dw_mmc: fix misleading error print if failing to do DMA transfer Shawn Lin
@ 2016-09-22  4:26     ` Jaehoon Chung
  0 siblings, 0 replies; 11+ messages in thread
From: Jaehoon Chung @ 2016-09-22  4:26 UTC (permalink / raw)
  To: Shawn Lin
  Cc: Ulf Hansson, linux-mmc, linux-kernel, Doug Anderson,
	Heiko Stuebner, linux-rockchip

On 09/02/2016 01:14 PM, Shawn Lin wrote:
> The original log didn't figure out that we could still
> finish this transfer by PIO mode even if failing to use
> DMA. And it should be kept for debug level instead of
> error one.

Applied on my tree. Thanks!

Best Regards,
Jaehoon Chung

> 
> Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
> ---
> 
> Changes in v2: None
> 
>  drivers/mmc/host/dw_mmc.c | 6 ++++--
>  1 file changed, 4 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
> index daa1c52..833b6ad 100644
> --- a/drivers/mmc/host/dw_mmc.c
> +++ b/drivers/mmc/host/dw_mmc.c
> @@ -1057,8 +1057,10 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
>  	spin_unlock_irqrestore(&host->irq_lock, irqflags);
>  
>  	if (host->dma_ops->start(host, sg_len)) {
> -		/* We can't do DMA */
> -		dev_err(host->dev, "%s: failed to start DMA.\n", __func__);
> +		/* We can't do DMA, try PIO for this one */
> +		dev_dbg(host->dev,
> +			"%s: fall back to PIO mode for current transfer\n",
> +			__func__);
>  		return -ENODEV;
>  	}
>  
> 

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v2 4/4] mmc: dw_mmc: use macro to define ring buffer size
  2016-09-02  4:14   ` [PATCH v2 4/4] mmc: dw_mmc: use macro to define ring buffer size Shawn Lin
@ 2016-09-22  4:27     ` Jaehoon Chung
  0 siblings, 0 replies; 11+ messages in thread
From: Jaehoon Chung @ 2016-09-22  4:27 UTC (permalink / raw)
  To: Shawn Lin
  Cc: Ulf Hansson, linux-mmc, linux-kernel, Doug Anderson,
	Heiko Stuebner, linux-rockchip

On 09/02/2016 01:14 PM, Shawn Lin wrote:
> It's very prone to make mistake as we might forget
> to replace all PAGE_SIZEs with new values if we try
> to modify the ring buffer size for whatever reasons.
> Let's use a macro to define it.

Applied on my tree. Thanks!

Best Regards,
Jaehoon Chung

> 
> Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
> ---
> 
> Changes in v2: None
> 
>  drivers/mmc/host/dw_mmc.c | 15 ++++++++++-----
>  1 file changed, 10 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
> index 833b6ad..6e5926b 100644
> --- a/drivers/mmc/host/dw_mmc.c
> +++ b/drivers/mmc/host/dw_mmc.c
> @@ -61,6 +61,8 @@
>  				 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
>  				 SDMMC_IDMAC_INT_TI)
>  
> +#define DESC_RING_BUF_SZ	PAGE_SIZE
> +
>  struct idmac_desc_64addr {
>  	u32		des0;	/* Control Descriptor */
>  
> @@ -474,7 +476,8 @@ static int dw_mci_idmac_init(struct dw_mci *host)
>  	if (host->dma_64bit_address == 1) {
>  		struct idmac_desc_64addr *p;
>  		/* Number of descriptors in the ring buffer */
> -		host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc_64addr);
> +		host->ring_size =
> +			DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr);
>  
>  		/* Forward link the descriptor list */
>  		for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
> @@ -500,7 +503,8 @@ static int dw_mci_idmac_init(struct dw_mci *host)
>  	} else {
>  		struct idmac_desc *p;
>  		/* Number of descriptors in the ring buffer */
> -		host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
> +		host->ring_size =
> +			DESC_RING_BUF_SZ / sizeof(struct idmac_desc);
>  
>  		/* Forward link the descriptor list */
>  		for (i = 0, p = host->sg_cpu;
> @@ -609,7 +613,7 @@ static inline int dw_mci_prepare_desc64(struct dw_mci *host,
>  err_own_bit:
>  	/* restore the descriptor chain as it's polluted */
>  	dev_dbg(host->dev, "desciptor is still owned by IDMAC.\n");
> -	memset(host->sg_cpu, 0, PAGE_SIZE);
> +	memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
>  	dw_mci_idmac_init(host);
>  	return -EINVAL;
>  }
> @@ -685,7 +689,7 @@ static inline int dw_mci_prepare_desc32(struct dw_mci *host,
>  err_own_bit:
>  	/* restore the descriptor chain as it's polluted */
>  	dev_dbg(host->dev, "desciptor is still owned by IDMAC.\n");
> -	memset(host->sg_cpu, 0, PAGE_SIZE);
> +	memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
>  	dw_mci_idmac_init(host);
>  	return -EINVAL;
>  }
> @@ -2750,7 +2754,8 @@ static void dw_mci_init_dma(struct dw_mci *host)
>  		}
>  
>  		/* Alloc memory for sg translation */
> -		host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
> +		host->sg_cpu = dmam_alloc_coherent(host->dev,
> +						   DESC_RING_BUF_SZ,
>  						   &host->sg_dma, GFP_KERNEL);
>  		if (!host->sg_cpu) {
>  			dev_err(host->dev,
> 

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2016-09-22  4:27 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <CGME20160902041949epcas1p443d773752cd7fb202beee72e88dbe8b9@epcas1p4.samsung.com>
2016-09-02  4:14 ` [PATCH v2 1/4] mmc: dw_mmc: split out preparation of desc for IDMAC32 and IDMAC64 Shawn Lin
2016-09-02  4:14   ` [PATCH v2 2/4] mmc: dw_mmc: avoid race condition of cpu and IDMAC Shawn Lin
2016-09-20  9:47     ` Shawn Lin
2016-09-20  9:49       ` Jaehoon Chung
2016-09-21  1:03         ` Shawn Lin
2016-09-22  4:26     ` Jaehoon Chung
2016-09-02  4:14   ` [PATCH v2 3/4] mmc: dw_mmc: fix misleading error print if failing to do DMA transfer Shawn Lin
2016-09-22  4:26     ` Jaehoon Chung
2016-09-02  4:14   ` [PATCH v2 4/4] mmc: dw_mmc: use macro to define ring buffer size Shawn Lin
2016-09-22  4:27     ` Jaehoon Chung
2016-09-22  4:25   ` [PATCH v2 1/4] mmc: dw_mmc: split out preparation of desc for IDMAC32 and IDMAC64 Jaehoon Chung

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).