All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor autoloading feature
@ 2010-03-10 14:12 ` Venkatraman S
  0 siblings, 0 replies; 24+ messages in thread
From: Venkatraman S @ 2010-03-10 14:12 UTC (permalink / raw)
  To: linux-omap, linux-arm-kernel, linux-mmc
  Cc: Adrian Hunter, Madhusudhan Chikkature, Tony Lindgren

See previous post http://patchwork.kernel.org/patch/82909/.
Rebased to 2.6.34-rc1

CC: Adrian Hunter <adrian.hunter@nokia.com>
CC: Madhusudhan C <madhu.cr@ti.com>
CC: Tony Lindgren <tony@atomide.com>
Signed-off-by: Venkatraman S <svenkatr@ti.com>
---
 drivers/mmc/host/omap_hsmmc.c |  143 +++++++++++++++++++++++++++++++++++------
 1 files changed, 122 insertions(+), 21 deletions(-)

diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index ea2a082..131d889 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -102,6 +102,7 @@
 #define SRD			(1 << 26)
 #define SOFTRESET		(1 << 1)
 #define RESETDONE		(1 << 0)
+#define DMA_ICR_QUIET		0xD00

 /*
  * FIXME: Most likely all the data using these _DEVID defines should come
@@ -118,6 +119,12 @@
 #define OMAP_MMC_MASTER_CLOCK	96000000
 #define DRIVER_NAME		"mmci-omap-hs"

+#define DMA_TYPE_NODMA	0
+#define DMA_TYPE_SDMA	1
+#define DMA_TYPE_SDMA_DLOAD 2
+
+#define DMA_CTRL_BUF_SIZE	(PAGE_SIZE * 3)
+
 /* Timeouts for entering power saving states on inactivity, msec */
 #define OMAP_MMC_DISABLED_TIMEOUT	100
 #define OMAP_MMC_SLEEP_TIMEOUT		1000
@@ -172,7 +179,11 @@ struct omap_hsmmc_host {
 	u32			bytesleft;
 	int			suspended;
 	int			irq;
-	int			use_dma, dma_ch;
+	int			dma_caps;
+	int			dma_in_use;
+	int			dma_ch;
+	void			*dma_ctrl_buf;
+	dma_addr_t		dma_ctrl_buf_phy;
 	int			dma_line_tx, dma_line_rx;
 	int			slot_id;
 	int			got_dbclk;
@@ -768,7 +779,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host
*host, struct mmc_command *cmd,
 	OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
 	OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);

-	if (host->use_dma)
+	if (host->dma_in_use)
 		OMAP_HSMMC_WRITE(host->base, IE,
 				 INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE));
 	else
@@ -803,7 +814,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host
*host, struct mmc_command *cmd,
 			cmdreg &= ~(DDIR);
 	}

-	if (host->use_dma)
+	if (host->dma_in_use)
 		cmdreg |= DMA_EN;

 	/*
@@ -850,7 +861,7 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host,
struct mmc_data *data)

 	host->data = NULL;

-	if (host->use_dma && host->dma_ch != -1)
+	if (host->dma_in_use && host->dma_ch != -1)
 		dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
 			omap_hsmmc_get_dma_dir(host, data));

@@ -900,7 +911,7 @@ static void omap_hsmmc_dma_cleanup(struct
omap_hsmmc_host *host, int errno)
 {
 	host->data->error = errno;

-	if (host->use_dma && host->dma_ch != -1) {
+	if (host->dma_in_use && host->dma_ch != -1) {
 		dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len,
 			omap_hsmmc_get_dma_dir(host, host->data));
 		omap_free_dma(host->dma_ch);
@@ -1253,7 +1264,6 @@ static void omap_hsmmc_config_dma_params(struct
omap_hsmmc_host *host,
 			omap_hsmmc_get_dma_sync_dev(host, data),
 			!(data->flags & MMC_DATA_WRITE));

-	omap_start_dma(dma_ch);
 }

 /*
@@ -1268,21 +1278,32 @@ static void omap_hsmmc_dma_cb(int lch, u16
ch_status, void *data)

 	if (host->dma_ch < 0)
 		return;
-
-	host->dma_sg_idx++;
-	if (host->dma_sg_idx < host->dma_len) {
-		/* Fire up the next transfer. */
-		omap_hsmmc_config_dma_params(host, host->data,
+	if (host->dma_in_use == DMA_TYPE_SDMA) {
+		host->dma_sg_idx++;
+		if (host->dma_sg_idx < host->dma_len) {
+			/* Fire up the next transfer. */
+			omap_hsmmc_config_dma_params(host, host->data,
 					   host->data->sg + host->dma_sg_idx);
-		return;
+			omap_start_dma(host->dma_ch);
+			return;
+		}
 	}

 }

+static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host)
+{
+	if (host->dma_in_use == DMA_TYPE_SDMA)
+		omap_start_dma(host->dma_ch);
+	else if (host->dma_in_use == DMA_TYPE_SDMA_DLOAD)
+		return omap_start_dma_sglist_transfers(host->dma_ch, -1);
+
+	return 0;
+}
 /*
- * Routine to configure and start DMA for the MMC card
+ * Routine to configure DMA for the MMC card
  */
-static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
+static int omap_hsmmc_configure_sdma(struct omap_hsmmc_host *host,
 					struct mmc_request *req)
 {
 	int dma_ch = 0, ret = 0, err = 1, i;
@@ -1339,6 +1360,56 @@ static int omap_hsmmc_start_dma_transfer(struct
omap_hsmmc_host *host,
 	return 0;
 }

+static int omap_hsmmc_configure_sdma_sglist(struct omap_hsmmc_host *host,
+		struct mmc_request *req)
+{
+	int i;
+	struct omap_dma_sglist_node *sglist, *snode;
+	struct mmc_data *data = req->data;
+	int blksz;
+	int dmadir = omap_hsmmc_get_dma_dir(host, data);
+	struct omap_dma_sglist_type2a_params *t2p;
+
+	sglist = (struct omap_dma_sglist_node *) host->dma_ctrl_buf;
+	snode = sglist;
+	blksz = host->data->blksz;
+
+	if ((host->dma_len * sizeof(*snode)) > DMA_CTRL_BUF_SIZE) {
+		dev_err(mmc_dev(host->mmc), "not enough sglist memory %d\n",
+			host->dma_len);
+		return -ENOMEM;
+	}
+	for (i = 0; i < host->dma_len; snode++, i++) {
+		snode->desc_type = OMAP_DMA_SGLIST_DESCRIPTOR_TYPE2a;
+		snode->num_of_elem = blksz / 4;
+		t2p = &snode->sg_node.t2a;
+
+		if (dmadir == DMA_FROM_DEVICE) {
+			t2p->src_addr = host->mapbase + OMAP_HSMMC_DATA;
+			t2p->dst_addr = sg_dma_address(data->sg + i);
+		} else {
+			t2p->dst_addr = host->mapbase + OMAP_HSMMC_DATA;
+			t2p->src_addr = sg_dma_address(data->sg + i);
+		}
+		snode->flags =
+			OMAP_DMA_LIST_DST_VALID | OMAP_DMA_LIST_SRC_VALID;
+
+		t2p->cfn_fn = sg_dma_len(data->sg + i) / host->data->blksz;
+		t2p->cicr = DMA_ICR_QUIET;
+
+		t2p->dst_frame_idx_or_pkt_size = 0;
+		t2p->src_frame_idx_or_pkt_size = 0;
+		t2p->dst_elem_idx = 0;
+		t2p->src_elem_idx = 0;
+	}
+	dev_dbg(mmc_dev(host->mmc), "new sglist %x len =%d\n",
+			host->dma_ctrl_buf_phy, i);
+	omap_set_dma_sglist_mode(host->dma_ch, sglist,
+			host->dma_ctrl_buf_phy, i, NULL);
+	omap_dma_set_sglist_fastmode(host->dma_ch, 1);
+	return 0;
+}
+
 static void set_data_timeout(struct omap_hsmmc_host *host,
 			     unsigned int timeout_ns,
 			     unsigned int timeout_clks)
@@ -1400,14 +1471,23 @@ omap_hsmmc_prepare_data(struct omap_hsmmc_host
*host, struct mmc_request *req)
 					| (req->data->blocks << 16));
 	set_data_timeout(host, req->data->timeout_ns, req->data->timeout_clks);

-	if (host->use_dma) {
-		ret = omap_hsmmc_start_dma_transfer(host, req);
-		if (ret != 0) {
-			dev_dbg(mmc_dev(host->mmc), "MMC start dma failure\n");
+	if (host->dma_caps & DMA_TYPE_SDMA) {
+		ret = omap_hsmmc_configure_sdma(host, req);
+		if (ret)
 			return ret;
-		}
+		host->dma_in_use = DMA_TYPE_SDMA;
 	}
-	return 0;
+	if ((host->dma_caps & DMA_TYPE_SDMA_DLOAD) &&
+		host->data->sg_len > 4) {
+		ret = omap_hsmmc_configure_sdma_sglist(host, req);
+		if (ret)
+			return ret;
+		host->dma_in_use = DMA_TYPE_SDMA_DLOAD;
+
+	}
+	ret = omap_hsmmc_start_dma_transfer(host);
+	return ret;
+
 }

 /*
@@ -1999,7 +2079,9 @@ static int __init omap_hsmmc_probe(struct
platform_device *pdev)
 	host->mmc	= mmc;
 	host->pdata	= pdata;
 	host->dev	= &pdev->dev;
-	host->use_dma	= 1;
+	host->dma_caps	= DMA_TYPE_SDMA;
+	host->dma_in_use	= DMA_TYPE_NODMA;
+	host->dma_ctrl_buf = NULL;
 	host->dev->dma_mask = &pdata->dma_mask;
 	host->dma_ch	= -1;
 	host->irq	= irq;
@@ -2081,6 +2163,15 @@ static int __init omap_hsmmc_probe(struct
platform_device *pdev)
 							" clk failed\n");
 	}

+	if (cpu_is_omap44xx() || cpu_is_omap3630()) {
+		host->dma_ctrl_buf = dma_alloc_coherent(NULL,
+					DMA_CTRL_BUF_SIZE,
+					&host->dma_ctrl_buf_phy,
+					0);
+		if (host->dma_ctrl_buf != NULL)
+			host->dma_caps |= DMA_TYPE_SDMA_DLOAD;
+	}
+
 	/* Since we do only SG emulation, we can have as many segs
 	 * as we want. */
 	mmc->max_phys_segs = 1024;
@@ -2207,6 +2298,10 @@ err_reg:
 err_irq_cd_init:
 	free_irq(host->irq, host);
 err_irq:
+	if (host->dma_ctrl_buf)
+		dma_free_coherent(NULL, DMA_CTRL_BUF_SIZE,
+				host->dma_ctrl_buf,
+				host->dma_ctrl_buf_phy);
 	mmc_host_disable(host->mmc);
 	clk_disable(host->iclk);
 	clk_put(host->fclk);
@@ -2234,6 +2329,12 @@ static int omap_hsmmc_remove(struct
platform_device *pdev)
 	if (host) {
 		mmc_host_enable(host->mmc);
 		mmc_remove_host(host->mmc);
+
+		if (host->dma_ctrl_buf != NULL) {
+			dma_free_coherent(NULL, DMA_CTRL_BUF_SIZE,
+				host->dma_ctrl_buf,
+				host->dma_ctrl_buf_phy);
+		}
 		if (host->use_reg)
 			omap_hsmmc_reg_put(host);
 		if (host->pdata->cleanup)
-- 
1.6.3.3

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor autoloading feature
@ 2010-03-10 14:12 ` Venkatraman S
  0 siblings, 0 replies; 24+ messages in thread
From: Venkatraman S @ 2010-03-10 14:12 UTC (permalink / raw)
  To: linux-arm-kernel

See previous post http://patchwork.kernel.org/patch/82909/.
Rebased to 2.6.34-rc1

CC: Adrian Hunter <adrian.hunter@nokia.com>
CC: Madhusudhan C <madhu.cr@ti.com>
CC: Tony Lindgren <tony@atomide.com>
Signed-off-by: Venkatraman S <svenkatr@ti.com>
---
 drivers/mmc/host/omap_hsmmc.c |  143 +++++++++++++++++++++++++++++++++++------
 1 files changed, 122 insertions(+), 21 deletions(-)

diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index ea2a082..131d889 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -102,6 +102,7 @@
 #define SRD			(1 << 26)
 #define SOFTRESET		(1 << 1)
 #define RESETDONE		(1 << 0)
+#define DMA_ICR_QUIET		0xD00

 /*
  * FIXME: Most likely all the data using these _DEVID defines should come
@@ -118,6 +119,12 @@
 #define OMAP_MMC_MASTER_CLOCK	96000000
 #define DRIVER_NAME		"mmci-omap-hs"

+#define DMA_TYPE_NODMA	0
+#define DMA_TYPE_SDMA	1
+#define DMA_TYPE_SDMA_DLOAD 2
+
+#define DMA_CTRL_BUF_SIZE	(PAGE_SIZE * 3)
+
 /* Timeouts for entering power saving states on inactivity, msec */
 #define OMAP_MMC_DISABLED_TIMEOUT	100
 #define OMAP_MMC_SLEEP_TIMEOUT		1000
@@ -172,7 +179,11 @@ struct omap_hsmmc_host {
 	u32			bytesleft;
 	int			suspended;
 	int			irq;
-	int			use_dma, dma_ch;
+	int			dma_caps;
+	int			dma_in_use;
+	int			dma_ch;
+	void			*dma_ctrl_buf;
+	dma_addr_t		dma_ctrl_buf_phy;
 	int			dma_line_tx, dma_line_rx;
 	int			slot_id;
 	int			got_dbclk;
@@ -768,7 +779,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host
*host, struct mmc_command *cmd,
 	OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
 	OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);

-	if (host->use_dma)
+	if (host->dma_in_use)
 		OMAP_HSMMC_WRITE(host->base, IE,
 				 INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE));
 	else
@@ -803,7 +814,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host
*host, struct mmc_command *cmd,
 			cmdreg &= ~(DDIR);
 	}

-	if (host->use_dma)
+	if (host->dma_in_use)
 		cmdreg |= DMA_EN;

 	/*
@@ -850,7 +861,7 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host,
struct mmc_data *data)

 	host->data = NULL;

-	if (host->use_dma && host->dma_ch != -1)
+	if (host->dma_in_use && host->dma_ch != -1)
 		dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
 			omap_hsmmc_get_dma_dir(host, data));

@@ -900,7 +911,7 @@ static void omap_hsmmc_dma_cleanup(struct
omap_hsmmc_host *host, int errno)
 {
 	host->data->error = errno;

-	if (host->use_dma && host->dma_ch != -1) {
+	if (host->dma_in_use && host->dma_ch != -1) {
 		dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len,
 			omap_hsmmc_get_dma_dir(host, host->data));
 		omap_free_dma(host->dma_ch);
@@ -1253,7 +1264,6 @@ static void omap_hsmmc_config_dma_params(struct
omap_hsmmc_host *host,
 			omap_hsmmc_get_dma_sync_dev(host, data),
 			!(data->flags & MMC_DATA_WRITE));

-	omap_start_dma(dma_ch);
 }

 /*
@@ -1268,21 +1278,32 @@ static void omap_hsmmc_dma_cb(int lch, u16
ch_status, void *data)

 	if (host->dma_ch < 0)
 		return;
-
-	host->dma_sg_idx++;
-	if (host->dma_sg_idx < host->dma_len) {
-		/* Fire up the next transfer. */
-		omap_hsmmc_config_dma_params(host, host->data,
+	if (host->dma_in_use == DMA_TYPE_SDMA) {
+		host->dma_sg_idx++;
+		if (host->dma_sg_idx < host->dma_len) {
+			/* Fire up the next transfer. */
+			omap_hsmmc_config_dma_params(host, host->data,
 					   host->data->sg + host->dma_sg_idx);
-		return;
+			omap_start_dma(host->dma_ch);
+			return;
+		}
 	}

 }

+static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host)
+{
+	if (host->dma_in_use == DMA_TYPE_SDMA)
+		omap_start_dma(host->dma_ch);
+	else if (host->dma_in_use == DMA_TYPE_SDMA_DLOAD)
+		return omap_start_dma_sglist_transfers(host->dma_ch, -1);
+
+	return 0;
+}
 /*
- * Routine to configure and start DMA for the MMC card
+ * Routine to configure DMA for the MMC card
  */
-static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
+static int omap_hsmmc_configure_sdma(struct omap_hsmmc_host *host,
 					struct mmc_request *req)
 {
 	int dma_ch = 0, ret = 0, err = 1, i;
@@ -1339,6 +1360,56 @@ static int omap_hsmmc_start_dma_transfer(struct
omap_hsmmc_host *host,
 	return 0;
 }

+static int omap_hsmmc_configure_sdma_sglist(struct omap_hsmmc_host *host,
+		struct mmc_request *req)
+{
+	int i;
+	struct omap_dma_sglist_node *sglist, *snode;
+	struct mmc_data *data = req->data;
+	int blksz;
+	int dmadir = omap_hsmmc_get_dma_dir(host, data);
+	struct omap_dma_sglist_type2a_params *t2p;
+
+	sglist = (struct omap_dma_sglist_node *) host->dma_ctrl_buf;
+	snode = sglist;
+	blksz = host->data->blksz;
+
+	if ((host->dma_len * sizeof(*snode)) > DMA_CTRL_BUF_SIZE) {
+		dev_err(mmc_dev(host->mmc), "not enough sglist memory %d\n",
+			host->dma_len);
+		return -ENOMEM;
+	}
+	for (i = 0; i < host->dma_len; snode++, i++) {
+		snode->desc_type = OMAP_DMA_SGLIST_DESCRIPTOR_TYPE2a;
+		snode->num_of_elem = blksz / 4;
+		t2p = &snode->sg_node.t2a;
+
+		if (dmadir == DMA_FROM_DEVICE) {
+			t2p->src_addr = host->mapbase + OMAP_HSMMC_DATA;
+			t2p->dst_addr = sg_dma_address(data->sg + i);
+		} else {
+			t2p->dst_addr = host->mapbase + OMAP_HSMMC_DATA;
+			t2p->src_addr = sg_dma_address(data->sg + i);
+		}
+		snode->flags =
+			OMAP_DMA_LIST_DST_VALID | OMAP_DMA_LIST_SRC_VALID;
+
+		t2p->cfn_fn = sg_dma_len(data->sg + i) / host->data->blksz;
+		t2p->cicr = DMA_ICR_QUIET;
+
+		t2p->dst_frame_idx_or_pkt_size = 0;
+		t2p->src_frame_idx_or_pkt_size = 0;
+		t2p->dst_elem_idx = 0;
+		t2p->src_elem_idx = 0;
+	}
+	dev_dbg(mmc_dev(host->mmc), "new sglist %x len =%d\n",
+			host->dma_ctrl_buf_phy, i);
+	omap_set_dma_sglist_mode(host->dma_ch, sglist,
+			host->dma_ctrl_buf_phy, i, NULL);
+	omap_dma_set_sglist_fastmode(host->dma_ch, 1);
+	return 0;
+}
+
 static void set_data_timeout(struct omap_hsmmc_host *host,
 			     unsigned int timeout_ns,
 			     unsigned int timeout_clks)
@@ -1400,14 +1471,23 @@ omap_hsmmc_prepare_data(struct omap_hsmmc_host
*host, struct mmc_request *req)
 					| (req->data->blocks << 16));
 	set_data_timeout(host, req->data->timeout_ns, req->data->timeout_clks);

-	if (host->use_dma) {
-		ret = omap_hsmmc_start_dma_transfer(host, req);
-		if (ret != 0) {
-			dev_dbg(mmc_dev(host->mmc), "MMC start dma failure\n");
+	if (host->dma_caps & DMA_TYPE_SDMA) {
+		ret = omap_hsmmc_configure_sdma(host, req);
+		if (ret)
 			return ret;
-		}
+		host->dma_in_use = DMA_TYPE_SDMA;
 	}
-	return 0;
+	if ((host->dma_caps & DMA_TYPE_SDMA_DLOAD) &&
+		host->data->sg_len > 4) {
+		ret = omap_hsmmc_configure_sdma_sglist(host, req);
+		if (ret)
+			return ret;
+		host->dma_in_use = DMA_TYPE_SDMA_DLOAD;
+
+	}
+	ret = omap_hsmmc_start_dma_transfer(host);
+	return ret;
+
 }

 /*
@@ -1999,7 +2079,9 @@ static int __init omap_hsmmc_probe(struct
platform_device *pdev)
 	host->mmc	= mmc;
 	host->pdata	= pdata;
 	host->dev	= &pdev->dev;
-	host->use_dma	= 1;
+	host->dma_caps	= DMA_TYPE_SDMA;
+	host->dma_in_use	= DMA_TYPE_NODMA;
+	host->dma_ctrl_buf = NULL;
 	host->dev->dma_mask = &pdata->dma_mask;
 	host->dma_ch	= -1;
 	host->irq	= irq;
@@ -2081,6 +2163,15 @@ static int __init omap_hsmmc_probe(struct
platform_device *pdev)
 							" clk failed\n");
 	}

+	if (cpu_is_omap44xx() || cpu_is_omap3630()) {
+		host->dma_ctrl_buf = dma_alloc_coherent(NULL,
+					DMA_CTRL_BUF_SIZE,
+					&host->dma_ctrl_buf_phy,
+					0);
+		if (host->dma_ctrl_buf != NULL)
+			host->dma_caps |= DMA_TYPE_SDMA_DLOAD;
+	}
+
 	/* Since we do only SG emulation, we can have as many segs
 	 * as we want. */
 	mmc->max_phys_segs = 1024;
@@ -2207,6 +2298,10 @@ err_reg:
 err_irq_cd_init:
 	free_irq(host->irq, host);
 err_irq:
+	if (host->dma_ctrl_buf)
+		dma_free_coherent(NULL, DMA_CTRL_BUF_SIZE,
+				host->dma_ctrl_buf,
+				host->dma_ctrl_buf_phy);
 	mmc_host_disable(host->mmc);
 	clk_disable(host->iclk);
 	clk_put(host->fclk);
@@ -2234,6 +2329,12 @@ static int omap_hsmmc_remove(struct
platform_device *pdev)
 	if (host) {
 		mmc_host_enable(host->mmc);
 		mmc_remove_host(host->mmc);
+
+		if (host->dma_ctrl_buf != NULL) {
+			dma_free_coherent(NULL, DMA_CTRL_BUF_SIZE,
+				host->dma_ctrl_buf,
+				host->dma_ctrl_buf_phy);
+		}
 		if (host->use_reg)
 			omap_hsmmc_reg_put(host);
 		if (host->pdata->cleanup)
-- 
1.6.3.3

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* Re: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor autoloading feature
  2010-03-10 14:12 ` Venkatraman S
@ 2010-03-10 22:41   ` Tony Lindgren
  -1 siblings, 0 replies; 24+ messages in thread
From: Tony Lindgren @ 2010-03-10 22:41 UTC (permalink / raw)
  To: Venkatraman S
  Cc: linux-omap, linux-arm-kernel, linux-mmc, Adrian Hunter,
	Madhusudhan Chikkature

* Venkatraman S <svenkatr@ti.com> [100310 06:08]:
> @@ -1400,14 +1471,23 @@ omap_hsmmc_prepare_data(struct omap_hsmmc_host
> *host, struct mmc_request *req)
>  					| (req->data->blocks << 16));
>  	set_data_timeout(host, req->data->timeout_ns, req->data->timeout_clks);
> 
> -	if (host->use_dma) {
> -		ret = omap_hsmmc_start_dma_transfer(host, req);
> -		if (ret != 0) {
> -			dev_dbg(mmc_dev(host->mmc), "MMC start dma failure\n");
> +	if (host->dma_caps & DMA_TYPE_SDMA) {
> +		ret = omap_hsmmc_configure_sdma(host, req);
> +		if (ret)
>  			return ret;
> -		}
> +		host->dma_in_use = DMA_TYPE_SDMA;
>  	}
> -	return 0;
> +	if ((host->dma_caps & DMA_TYPE_SDMA_DLOAD) &&
> +		host->data->sg_len > 4) {
> +		ret = omap_hsmmc_configure_sdma_sglist(host, req);
> +		if (ret)
> +			return ret;
> +		host->dma_in_use = DMA_TYPE_SDMA_DLOAD;
> +
> +	}
> +	ret = omap_hsmmc_start_dma_transfer(host);
> +	return ret;
> +
>  }

Does the driver still work in PIO mode?

We need to have the drivers capable to fail over to PIO mode
as the DMA channels can run out.

Regards,

Tony

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor autoloading feature
@ 2010-03-10 22:41   ` Tony Lindgren
  0 siblings, 0 replies; 24+ messages in thread
From: Tony Lindgren @ 2010-03-10 22:41 UTC (permalink / raw)
  To: linux-arm-kernel

* Venkatraman S <svenkatr@ti.com> [100310 06:08]:
> @@ -1400,14 +1471,23 @@ omap_hsmmc_prepare_data(struct omap_hsmmc_host
> *host, struct mmc_request *req)
>  					| (req->data->blocks << 16));
>  	set_data_timeout(host, req->data->timeout_ns, req->data->timeout_clks);
> 
> -	if (host->use_dma) {
> -		ret = omap_hsmmc_start_dma_transfer(host, req);
> -		if (ret != 0) {
> -			dev_dbg(mmc_dev(host->mmc), "MMC start dma failure\n");
> +	if (host->dma_caps & DMA_TYPE_SDMA) {
> +		ret = omap_hsmmc_configure_sdma(host, req);
> +		if (ret)
>  			return ret;
> -		}
> +		host->dma_in_use = DMA_TYPE_SDMA;
>  	}
> -	return 0;
> +	if ((host->dma_caps & DMA_TYPE_SDMA_DLOAD) &&
> +		host->data->sg_len > 4) {
> +		ret = omap_hsmmc_configure_sdma_sglist(host, req);
> +		if (ret)
> +			return ret;
> +		host->dma_in_use = DMA_TYPE_SDMA_DLOAD;
> +
> +	}
> +	ret = omap_hsmmc_start_dma_transfer(host);
> +	return ret;
> +
>  }

Does the driver still work in PIO mode?

We need to have the drivers capable to fail over to PIO mode
as the DMA channels can run out.

Regards,

Tony

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor autoloading feature
  2010-03-10 22:41   ` Tony Lindgren
@ 2010-03-11 15:08     ` Venkatraman S
  -1 siblings, 0 replies; 24+ messages in thread
From: Venkatraman S @ 2010-03-11 15:08 UTC (permalink / raw)
  To: Tony Lindgren
  Cc: linux-omap, linux-arm-kernel, linux-mmc, Adrian Hunter,
	Madhusudhan Chikkature

Tony Lindgren wrote:
> * Venkatraman S <svenkatr@ti.com> [100310 06:08]:
>> @@ -1400,14 +1471,23 @@ omap_hsmmc_prepare_data(struct omap_hsmmc_host
>> *host, struct mmc_request *req)
>>                                       | (req->data->blocks << 16));
>>       set_data_timeout(host, req->data->timeout_ns, req->data->timeout_clks);
>>
>> -     if (host->use_dma) {
>> -             ret = omap_hsmmc_start_dma_transfer(host, req);
>> -             if (ret != 0) {
>> -                     dev_dbg(mmc_dev(host->mmc), "MMC start dma failure\n");
>> +     if (host->dma_caps & DMA_TYPE_SDMA) {
>> +             ret = omap_hsmmc_configure_sdma(host, req);
>> +             if (ret)
>>                       return ret;
>> -             }
>> +             host->dma_in_use = DMA_TYPE_SDMA;
>>       }
>> -     return 0;
>> +     if ((host->dma_caps & DMA_TYPE_SDMA_DLOAD) &&
>> +             host->data->sg_len > 4) {
>> +             ret = omap_hsmmc_configure_sdma_sglist(host, req);
>> +             if (ret)
>> +                     return ret;
>> +             host->dma_in_use = DMA_TYPE_SDMA_DLOAD;
>> +
>> +     }
>> +     ret = omap_hsmmc_start_dma_transfer(host);
>> +     return ret;
>> +
>>  }
>
> Does the driver still work in PIO mode?
>
> We need to have the drivers capable to fail over to PIO mode
> as the DMA channels can run out.
>

   The driver doesn't have an automatic fallback to PIO,
even without my patch. A error return from omap_request_dma is
propogated all the way back to the transfer request.
  The decision to use_dma (the variable) is unaltered.

   Infact, it would be easier to implement a runtime fallback after
this patch is
merged as I have separated out the capability and runtime selection.
(dma_caps and dma_in_use).

Regards,

Venkat.
--
To unsubscribe from this list: send the line "unsubscribe linux-omap" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor autoloading feature
@ 2010-03-11 15:08     ` Venkatraman S
  0 siblings, 0 replies; 24+ messages in thread
From: Venkatraman S @ 2010-03-11 15:08 UTC (permalink / raw)
  To: linux-arm-kernel

Tony Lindgren wrote:
> * Venkatraman S <svenkatr@ti.com> [100310 06:08]:
>> @@ -1400,14 +1471,23 @@ omap_hsmmc_prepare_data(struct omap_hsmmc_host
>> *host, struct mmc_request *req)
>> ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? | (req->data->blocks << 16));
>> ? ? ? set_data_timeout(host, req->data->timeout_ns, req->data->timeout_clks);
>>
>> - ? ? if (host->use_dma) {
>> - ? ? ? ? ? ? ret = omap_hsmmc_start_dma_transfer(host, req);
>> - ? ? ? ? ? ? if (ret != 0) {
>> - ? ? ? ? ? ? ? ? ? ? dev_dbg(mmc_dev(host->mmc), "MMC start dma failure\n");
>> + ? ? if (host->dma_caps & DMA_TYPE_SDMA) {
>> + ? ? ? ? ? ? ret = omap_hsmmc_configure_sdma(host, req);
>> + ? ? ? ? ? ? if (ret)
>> ? ? ? ? ? ? ? ? ? ? ? return ret;
>> - ? ? ? ? ? ? }
>> + ? ? ? ? ? ? host->dma_in_use = DMA_TYPE_SDMA;
>> ? ? ? }
>> - ? ? return 0;
>> + ? ? if ((host->dma_caps & DMA_TYPE_SDMA_DLOAD) &&
>> + ? ? ? ? ? ? host->data->sg_len > 4) {
>> + ? ? ? ? ? ? ret = omap_hsmmc_configure_sdma_sglist(host, req);
>> + ? ? ? ? ? ? if (ret)
>> + ? ? ? ? ? ? ? ? ? ? return ret;
>> + ? ? ? ? ? ? host->dma_in_use = DMA_TYPE_SDMA_DLOAD;
>> +
>> + ? ? }
>> + ? ? ret = omap_hsmmc_start_dma_transfer(host);
>> + ? ? return ret;
>> +
>> ?}
>
> Does the driver still work in PIO mode?
>
> We need to have the drivers capable to fail over to PIO mode
> as the DMA channels can run out.
>

   The driver doesn't have an automatic fallback to PIO,
even without my patch. A error return from omap_request_dma is
propogated all the way back to the transfer request.
  The decision to use_dma (the variable) is unaltered.

   Infact, it would be easier to implement a runtime fallback after
this patch is
merged as I have separated out the capability and runtime selection.
(dma_caps and dma_in_use).

Regards,

Venkat.

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor autoloading feature
  2010-03-11 15:08     ` Venkatraman S
@ 2010-03-11 18:39       ` Tony Lindgren
  -1 siblings, 0 replies; 24+ messages in thread
From: Tony Lindgren @ 2010-03-11 18:39 UTC (permalink / raw)
  To: Venkatraman S
  Cc: linux-omap, linux-arm-kernel, linux-mmc, Adrian Hunter,
	Madhusudhan Chikkature

* Venkatraman S <svenkatr@ti.com> [100311 07:04]:
> Tony Lindgren wrote:
> >
> > Does the driver still work in PIO mode?
> >
> > We need to have the drivers capable to fail over to PIO mode
> > as the DMA channels can run out.
> >
> 
>    The driver doesn't have an automatic fallback to PIO,
> even without my patch. A error return from omap_request_dma is
> propogated all the way back to the transfer request.
>   The decision to use_dma (the variable) is unaltered.

OK, that might explain some nasty surprises then..

With these patches, does the driver still work in PIO mode though?
 
>    Infact, it would be easier to implement a runtime fallback after
> this patch is
> merged as I have separated out the capability and runtime selection.
> (dma_caps and dma_in_use).

Sounds good to me, thanks for looking into it.

Regards,

Tony

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor autoloading feature
@ 2010-03-11 18:39       ` Tony Lindgren
  0 siblings, 0 replies; 24+ messages in thread
From: Tony Lindgren @ 2010-03-11 18:39 UTC (permalink / raw)
  To: linux-arm-kernel

* Venkatraman S <svenkatr@ti.com> [100311 07:04]:
> Tony Lindgren wrote:
> >
> > Does the driver still work in PIO mode?
> >
> > We need to have the drivers capable to fail over to PIO mode
> > as the DMA channels can run out.
> >
> 
>    The driver doesn't have an automatic fallback to PIO,
> even without my patch. A error return from omap_request_dma is
> propogated all the way back to the transfer request.
>   The decision to use_dma (the variable) is unaltered.

OK, that might explain some nasty surprises then..

With these patches, does the driver still work in PIO mode though?
 
>    Infact, it would be easier to implement a runtime fallback after
> this patch is
> merged as I have separated out the capability and runtime selection.
> (dma_caps and dma_in_use).

Sounds good to me, thanks for looking into it.

Regards,

Tony

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor autoloading feature
  2010-03-11 18:39       ` Tony Lindgren
@ 2010-03-12  8:18         ` Venkatraman S
  -1 siblings, 0 replies; 24+ messages in thread
From: Venkatraman S @ 2010-03-12  8:18 UTC (permalink / raw)
  To: Tony Lindgren
  Cc: linux-omap, linux-arm-kernel, linux-mmc, Adrian Hunter,
	Madhusudhan Chikkature

Tony Lindgren wrote:
> * Venkatraman S <svenkatr@ti.com> [100311 07:04]:
>> Tony Lindgren wrote:
>> >
>> > Does the driver still work in PIO mode?
>> >
>> > We need to have the drivers capable to fail over to PIO mode
>> > as the DMA channels can run out.
>> >
>>
>>    The driver doesn't have an automatic fallback to PIO,
>> even without my patch. A error return from omap_request_dma is
>> propogated all the way back to the transfer request.
>>   The decision to use_dma (the variable) is unaltered.
>
> OK, that might explain some nasty surprises then..
>
> With these patches, does the driver still work in PIO mode though?

No. The workhorse code to actually push the data byte by byte
is not present in the hsmmc driver.

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor autoloading feature
@ 2010-03-12  8:18         ` Venkatraman S
  0 siblings, 0 replies; 24+ messages in thread
From: Venkatraman S @ 2010-03-12  8:18 UTC (permalink / raw)
  To: linux-arm-kernel

Tony Lindgren wrote:
> * Venkatraman S <svenkatr@ti.com> [100311 07:04]:
>> Tony Lindgren wrote:
>> >
>> > Does the driver still work in PIO mode?
>> >
>> > We need to have the drivers capable to fail over to PIO mode
>> > as the DMA channels can run out.
>> >
>>
>> ? ?The driver doesn't have an automatic fallback to PIO,
>> even without my patch. A error return from omap_request_dma is
>> propogated all the way back to the transfer request.
>> ? The decision to use_dma (the variable) is unaltered.
>
> OK, that might explain some nasty surprises then..
>
> With these patches, does the driver still work in PIO mode though?

No. The workhorse code to actually push the data byte by byte
is not present in the hsmmc driver.

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor autoloading feature
  2010-03-12  2:29           ` Madhusudhan
@ 2010-03-12  6:03             ` Venkatraman S
  -1 siblings, 0 replies; 24+ messages in thread
From: Venkatraman S @ 2010-03-12  6:03 UTC (permalink / raw)
  To: Madhusudhan; +Cc: linux-mmc, linux-arm-kernel, linux-omap

Madhusudhan wrote:
>
>
>> -----Original Message-----
>> From: svenkatr@gmail.com [mailto:svenkatr@gmail.com] On Behalf Of
>> Venkatraman S
>> Sent: Thursday, March 11, 2010 11:43 AM
>> To: Madhusudhan
>> Cc: linux-mmc@vger.kernel.org; linux-arm-kernel@lists.infradead.org;
>> linux-omap@vger.kernel.org
>> Subject: Re: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor
>> autoloading feature
>>
>> Madhusudhan <madhu.cr@ti.com> wrote:
>> >> -----Original Message-----
>> >> From: svenkatr@gmail.com [mailto:svenkatr@gmail.com] On Behalf Of
>> >> Venkatraman S
>> >> Sent: Thursday, March 11, 2010 4:52 AM
>> >> To: Madhusudhan
>> >> Cc: linux-mmc@vger.kernel.org; linux-arm-kernel@lists.infradead.org;
>> >> linux-omap@vger.kernel.org
>> >> Subject: Re: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor
>> >> autoloading feature
>> >>
>> >> Madhusudhan <madhu.cr@ti.com> wrote:
>> >> >> -----Original Message-----
>> >> >> From: linux-mmc-owner@vger.kernel.org [mailto:linux-mmc-
>> >> >> owner@vger.kernel.org] On Behalf Of Venkatraman S
>> >> >> Sent: Monday, March 01, 2010 5:27 AM
>> >> >> To: linux-mmc@vger.kernel.org; linux-arm-kernel@lists.infradead.org;
>> >> >> linux-omap@vger.kernel.org
>> >> >> Subject: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor
>> >> >> autoloading feature
>> >> >>
>> >> >> Start to use the sDMA descriptor autoloading feature.
>> >> >> For large datablocks, the MMC driver has to repeatedly setup,
>> program
>> >> >> and teardown the
>> >> >> dma channel for each element of the sglist received in
>> >> omap_hsmmc_request.
>> >> >>
>> >> >> By using descriptor autoloading, transfers from / to each element of
>> >> >> the sglist is pre programmed
>> >> >> into a linked list. The sDMA driver completes the entire transaction
>> >> >> and provides a single interrupt.
>> >> >>
>> >> >> Due to this, number of dma interrupts for a typical 100MB transfer
>> on
>> >> the
>> >> >> MMC is
>> >> >> reduced from 25000 to about 400 (approximate). Transfer speeds are
>> >> >> improved by ~5%
>> >> >> (Though it varies on the size of read / write & improves on huge
>> >> >> transfers)
>> >> >>
>> >> >> Descriptor autoloading is available only in 3630 and 4430 (as of
>> now).
>> >> >> Hence normal DMA
>> >> >> mode is also retained.
>> >> >>
>> >> >> Tested on omap4430 sdp.
>> >> >>
>> >> >> Signed-off-by: Venkatraman S <svenkatr@ti.com>
>> >> >
>> >> > I don't see any issues with this patch except the concern I had on
>> the
>> >> first
>> >> > patch in the series. Why is that change linked to this series?
>> >> >
>> >>   Thanks. The problem was seen only in the context of using descriptor
>> >> load. Would
>> >> you prefer that I post it as a separate patch ?
>> >
>> > My point is why that change is needed for this feature to work?
>> >
>> > When DMA is completed and a callback is received the ch can be freed.
>> Once
>> > TC is received the core is notified of the same.
>> >
>> > Can the first patch be dropped? Or do you see issues?
>> Yes there are issues without this patch when the scatterlist is large
>> (300+ blocks), where the dma completion interrupt is received but the
>> mmc driver hangs waiting for TC. I don't see the issue if I delay the
>> execution of omap_free_dma inside the dma callback.
>
> This is strange. Ideally after the dma cb is received the transfer complete
> interrupt should fire.
>
> Your first patch would break a corner erroneous case the driver is already
> handling. A scenario where TC was received before DMA cb came. There is
> timeout logic in the driver which handles this case to let the request
> succeed if a dma cb was received after a while otherwise err out. See the
> function omap_hsmmc_start_dma_transfer.
>
> Is there a way to keep both the cases handled? If not we have to make
> changes based on which of these scenario is very odd.

   I think these cases are already handled properly. All actions are
taken if, and only if, TC is received.
Once TC is received, the sequence of execution is unaltered by DMA
callback. Dma callback
is effectively a no-op after the final block is transmitted.

   We can in fact get rid of the timeout logic. Of course, the DMA
channel is cleared
once the TC is received, hence there would be no spurious DMA callbacks during
the start of next transaction. [The only hanging case is when the TC
is never triggerred,
even after a successful transfer, but that's another issue altogether]

Regards,
Venkat.

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor autoloading feature
@ 2010-03-12  6:03             ` Venkatraman S
  0 siblings, 0 replies; 24+ messages in thread
From: Venkatraman S @ 2010-03-12  6:03 UTC (permalink / raw)
  To: linux-arm-kernel

Madhusudhan wrote:
>
>
>> -----Original Message-----
>> From: svenkatr at gmail.com [mailto:svenkatr at gmail.com] On Behalf Of
>> Venkatraman S
>> Sent: Thursday, March 11, 2010 11:43 AM
>> To: Madhusudhan
>> Cc: linux-mmc at vger.kernel.org; linux-arm-kernel at lists.infradead.org;
>> linux-omap at vger.kernel.org
>> Subject: Re: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor
>> autoloading feature
>>
>> Madhusudhan <madhu.cr@ti.com> wrote:
>> >> -----Original Message-----
>> >> From: svenkatr at gmail.com [mailto:svenkatr at gmail.com] On Behalf Of
>> >> Venkatraman S
>> >> Sent: Thursday, March 11, 2010 4:52 AM
>> >> To: Madhusudhan
>> >> Cc: linux-mmc at vger.kernel.org; linux-arm-kernel at lists.infradead.org;
>> >> linux-omap at vger.kernel.org
>> >> Subject: Re: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor
>> >> autoloading feature
>> >>
>> >> Madhusudhan <madhu.cr@ti.com> wrote:
>> >> >> -----Original Message-----
>> >> >> From: linux-mmc-owner at vger.kernel.org [mailto:linux-mmc-
>> >> >> owner at vger.kernel.org] On Behalf Of Venkatraman S
>> >> >> Sent: Monday, March 01, 2010 5:27 AM
>> >> >> To: linux-mmc at vger.kernel.org; linux-arm-kernel at lists.infradead.org;
>> >> >> linux-omap at vger.kernel.org
>> >> >> Subject: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor
>> >> >> autoloading feature
>> >> >>
>> >> >> Start to use the sDMA descriptor autoloading feature.
>> >> >> For large datablocks, the MMC driver has to repeatedly setup,
>> program
>> >> >> and teardown the
>> >> >> dma channel for each element of the sglist received in
>> >> omap_hsmmc_request.
>> >> >>
>> >> >> By using descriptor autoloading, transfers from / to each element of
>> >> >> the sglist is pre programmed
>> >> >> into a linked list. The sDMA driver completes the entire transaction
>> >> >> and provides a single interrupt.
>> >> >>
>> >> >> Due to this, number of dma interrupts for a typical 100MB transfer
>> on
>> >> the
>> >> >> MMC is
>> >> >> reduced from 25000 to about 400 (approximate). Transfer speeds are
>> >> >> improved by ~5%
>> >> >> (Though it varies on the size of read / write & improves on huge
>> >> >> transfers)
>> >> >>
>> >> >> Descriptor autoloading is available only in 3630 and 4430 (as of
>> now).
>> >> >> Hence normal DMA
>> >> >> mode is also retained.
>> >> >>
>> >> >> Tested on omap4430 sdp.
>> >> >>
>> >> >> Signed-off-by: Venkatraman S <svenkatr@ti.com>
>> >> >
>> >> > I don't see any issues with this patch except the concern I had on
>> the
>> >> first
>> >> > patch in the series. Why is that change linked to this series?
>> >> >
>> >> ? Thanks. The problem was seen only in the context of using descriptor
>> >> load. Would
>> >> you prefer that I post it as a separate patch ?
>> >
>> > My point is why that change is needed for this feature to work?
>> >
>> > When DMA is completed and a callback is received the ch can be freed.
>> Once
>> > TC is received the core is notified of the same.
>> >
>> > Can the first patch be dropped? Or do you see issues?
>> Yes there are issues without this patch when the scatterlist is large
>> (300+ blocks), where the dma completion interrupt is received but the
>> mmc driver hangs waiting for TC. I don't see the issue if I delay the
>> execution of omap_free_dma inside the dma callback.
>
> This is strange. Ideally after the dma cb is received the transfer complete
> interrupt should fire.
>
> Your first patch would break a corner erroneous case the driver is already
> handling. A scenario where TC was received before DMA cb came. There is
> timeout logic in the driver which handles this case to let the request
> succeed if a dma cb was received after a while otherwise err out. See the
> function omap_hsmmc_start_dma_transfer.
>
> Is there a way to keep both the cases handled? If not we have to make
> changes based on which of these scenario is very odd.

   I think these cases are already handled properly. All actions are
taken if, and only if, TC is received.
Once TC is received, the sequence of execution is unaltered by DMA
callback. Dma callback
is effectively a no-op after the final block is transmitted.

   We can in fact get rid of the timeout logic. Of course, the DMA
channel is cleared
once the TC is received, hence there would be no spurious DMA callbacks during
the start of next transaction. [The only hanging case is when the TC
is never triggerred,
even after a successful transfer, but that's another issue altogether]

Regards,
Venkat.

^ permalink raw reply	[flat|nested] 24+ messages in thread

* RE: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor autoloading feature
  2010-03-11 17:42         ` Venkatraman S
@ 2010-03-12  2:29           ` Madhusudhan
  -1 siblings, 0 replies; 24+ messages in thread
From: Madhusudhan @ 2010-03-12  2:29 UTC (permalink / raw)
  To: 'Venkatraman S'; +Cc: linux-mmc, linux-arm-kernel, linux-omap



> -----Original Message-----
> From: svenkatr@gmail.com [mailto:svenkatr@gmail.com] On Behalf Of
> Venkatraman S
> Sent: Thursday, March 11, 2010 11:43 AM
> To: Madhusudhan
> Cc: linux-mmc@vger.kernel.org; linux-arm-kernel@lists.infradead.org;
> linux-omap@vger.kernel.org
> Subject: Re: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor
> autoloading feature
> 
> Madhusudhan <madhu.cr@ti.com> wrote:
> >> -----Original Message-----
> >> From: svenkatr@gmail.com [mailto:svenkatr@gmail.com] On Behalf Of
> >> Venkatraman S
> >> Sent: Thursday, March 11, 2010 4:52 AM
> >> To: Madhusudhan
> >> Cc: linux-mmc@vger.kernel.org; linux-arm-kernel@lists.infradead.org;
> >> linux-omap@vger.kernel.org
> >> Subject: Re: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor
> >> autoloading feature
> >>
> >> Madhusudhan <madhu.cr@ti.com> wrote:
> >> >> -----Original Message-----
> >> >> From: linux-mmc-owner@vger.kernel.org [mailto:linux-mmc-
> >> >> owner@vger.kernel.org] On Behalf Of Venkatraman S
> >> >> Sent: Monday, March 01, 2010 5:27 AM
> >> >> To: linux-mmc@vger.kernel.org; linux-arm-kernel@lists.infradead.org;
> >> >> linux-omap@vger.kernel.org
> >> >> Subject: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor
> >> >> autoloading feature
> >> >>
> >> >> Start to use the sDMA descriptor autoloading feature.
> >> >> For large datablocks, the MMC driver has to repeatedly setup,
> program
> >> >> and teardown the
> >> >> dma channel for each element of the sglist received in
> >> omap_hsmmc_request.
> >> >>
> >> >> By using descriptor autoloading, transfers from / to each element of
> >> >> the sglist is pre programmed
> >> >> into a linked list. The sDMA driver completes the entire transaction
> >> >> and provides a single interrupt.
> >> >>
> >> >> Due to this, number of dma interrupts for a typical 100MB transfer
> on
> >> the
> >> >> MMC is
> >> >> reduced from 25000 to about 400 (approximate). Transfer speeds are
> >> >> improved by ~5%
> >> >> (Though it varies on the size of read / write & improves on huge
> >> >> transfers)
> >> >>
> >> >> Descriptor autoloading is available only in 3630 and 4430 (as of
> now).
> >> >> Hence normal DMA
> >> >> mode is also retained.
> >> >>
> >> >> Tested on omap4430 sdp.
> >> >>
> >> >> Signed-off-by: Venkatraman S <svenkatr@ti.com>
> >> >
> >> > I don't see any issues with this patch except the concern I had on
> the
> >> first
> >> > patch in the series. Why is that change linked to this series?
> >> >
> >>   Thanks. The problem was seen only in the context of using descriptor
> >> load. Would
> >> you prefer that I post it as a separate patch ?
> >
> > My point is why that change is needed for this feature to work?
> >
> > When DMA is completed and a callback is received the ch can be freed.
> Once
> > TC is received the core is notified of the same.
> >
> > Can the first patch be dropped? Or do you see issues?
> Yes there are issues without this patch when the scatterlist is large
> (300+ blocks), where the dma completion interrupt is received but the
> mmc driver hangs waiting for TC. I don't see the issue if I delay the
> execution of omap_free_dma inside the dma callback.

This is strange. Ideally after the dma cb is received the transfer complete
interrupt should fire.

Your first patch would break a corner erroneous case the driver is already
handling. A scenario where TC was received before DMA cb came. There is
timeout logic in the driver which handles this case to let the request
succeed if a dma cb was received after a while otherwise err out. See the
function omap_hsmmc_start_dma_transfer.

Is there a way to keep both the cases handled? If not we have to make
changes based on which of these scenario is very odd.

Regards,
Madhu


^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor autoloading feature
@ 2010-03-12  2:29           ` Madhusudhan
  0 siblings, 0 replies; 24+ messages in thread
From: Madhusudhan @ 2010-03-12  2:29 UTC (permalink / raw)
  To: linux-arm-kernel



> -----Original Message-----
> From: svenkatr at gmail.com [mailto:svenkatr at gmail.com] On Behalf Of
> Venkatraman S
> Sent: Thursday, March 11, 2010 11:43 AM
> To: Madhusudhan
> Cc: linux-mmc at vger.kernel.org; linux-arm-kernel at lists.infradead.org;
> linux-omap at vger.kernel.org
> Subject: Re: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor
> autoloading feature
> 
> Madhusudhan <madhu.cr@ti.com> wrote:
> >> -----Original Message-----
> >> From: svenkatr at gmail.com [mailto:svenkatr at gmail.com] On Behalf Of
> >> Venkatraman S
> >> Sent: Thursday, March 11, 2010 4:52 AM
> >> To: Madhusudhan
> >> Cc: linux-mmc at vger.kernel.org; linux-arm-kernel at lists.infradead.org;
> >> linux-omap at vger.kernel.org
> >> Subject: Re: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor
> >> autoloading feature
> >>
> >> Madhusudhan <madhu.cr@ti.com> wrote:
> >> >> -----Original Message-----
> >> >> From: linux-mmc-owner at vger.kernel.org [mailto:linux-mmc-
> >> >> owner at vger.kernel.org] On Behalf Of Venkatraman S
> >> >> Sent: Monday, March 01, 2010 5:27 AM
> >> >> To: linux-mmc at vger.kernel.org; linux-arm-kernel at lists.infradead.org;
> >> >> linux-omap at vger.kernel.org
> >> >> Subject: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor
> >> >> autoloading feature
> >> >>
> >> >> Start to use the sDMA descriptor autoloading feature.
> >> >> For large datablocks, the MMC driver has to repeatedly setup,
> program
> >> >> and teardown the
> >> >> dma channel for each element of the sglist received in
> >> omap_hsmmc_request.
> >> >>
> >> >> By using descriptor autoloading, transfers from / to each element of
> >> >> the sglist is pre programmed
> >> >> into a linked list. The sDMA driver completes the entire transaction
> >> >> and provides a single interrupt.
> >> >>
> >> >> Due to this, number of dma interrupts for a typical 100MB transfer
> on
> >> the
> >> >> MMC is
> >> >> reduced from 25000 to about 400 (approximate). Transfer speeds are
> >> >> improved by ~5%
> >> >> (Though it varies on the size of read / write & improves on huge
> >> >> transfers)
> >> >>
> >> >> Descriptor autoloading is available only in 3630 and 4430 (as of
> now).
> >> >> Hence normal DMA
> >> >> mode is also retained.
> >> >>
> >> >> Tested on omap4430 sdp.
> >> >>
> >> >> Signed-off-by: Venkatraman S <svenkatr@ti.com>
> >> >
> >> > I don't see any issues with this patch except the concern I had on
> the
> >> first
> >> > patch in the series. Why is that change linked to this series?
> >> >
> >> ? Thanks. The problem was seen only in the context of using descriptor
> >> load. Would
> >> you prefer that I post it as a separate patch ?
> >
> > My point is why that change is needed for this feature to work?
> >
> > When DMA is completed and a callback is received the ch can be freed.
> Once
> > TC is received the core is notified of the same.
> >
> > Can the first patch be dropped? Or do you see issues?
> Yes there are issues without this patch when the scatterlist is large
> (300+ blocks), where the dma completion interrupt is received but the
> mmc driver hangs waiting for TC. I don't see the issue if I delay the
> execution of omap_free_dma inside the dma callback.

This is strange. Ideally after the dma cb is received the transfer complete
interrupt should fire.

Your first patch would break a corner erroneous case the driver is already
handling. A scenario where TC was received before DMA cb came. There is
timeout logic in the driver which handles this case to let the request
succeed if a dma cb was received after a while otherwise err out. See the
function omap_hsmmc_start_dma_transfer.

Is there a way to keep both the cases handled? If not we have to make
changes based on which of these scenario is very odd.

Regards,
Madhu

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor autoloading feature
  2010-03-11 16:27       ` Madhusudhan
@ 2010-03-11 17:42         ` Venkatraman S
  -1 siblings, 0 replies; 24+ messages in thread
From: Venkatraman S @ 2010-03-11 17:42 UTC (permalink / raw)
  To: Madhusudhan; +Cc: linux-mmc, linux-arm-kernel, linux-omap

Madhusudhan <madhu.cr@ti.com> wrote:
>> -----Original Message-----
>> From: svenkatr@gmail.com [mailto:svenkatr@gmail.com] On Behalf Of
>> Venkatraman S
>> Sent: Thursday, March 11, 2010 4:52 AM
>> To: Madhusudhan
>> Cc: linux-mmc@vger.kernel.org; linux-arm-kernel@lists.infradead.org;
>> linux-omap@vger.kernel.org
>> Subject: Re: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor
>> autoloading feature
>>
>> Madhusudhan <madhu.cr@ti.com> wrote:
>> >> -----Original Message-----
>> >> From: linux-mmc-owner@vger.kernel.org [mailto:linux-mmc-
>> >> owner@vger.kernel.org] On Behalf Of Venkatraman S
>> >> Sent: Monday, March 01, 2010 5:27 AM
>> >> To: linux-mmc@vger.kernel.org; linux-arm-kernel@lists.infradead.org;
>> >> linux-omap@vger.kernel.org
>> >> Subject: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor
>> >> autoloading feature
>> >>
>> >> Start to use the sDMA descriptor autoloading feature.
>> >> For large datablocks, the MMC driver has to repeatedly setup, program
>> >> and teardown the
>> >> dma channel for each element of the sglist received in
>> omap_hsmmc_request.
>> >>
>> >> By using descriptor autoloading, transfers from / to each element of
>> >> the sglist is pre programmed
>> >> into a linked list. The sDMA driver completes the entire transaction
>> >> and provides a single interrupt.
>> >>
>> >> Due to this, number of dma interrupts for a typical 100MB transfer on
>> the
>> >> MMC is
>> >> reduced from 25000 to about 400 (approximate). Transfer speeds are
>> >> improved by ~5%
>> >> (Though it varies on the size of read / write & improves on huge
>> >> transfers)
>> >>
>> >> Descriptor autoloading is available only in 3630 and 4430 (as of now).
>> >> Hence normal DMA
>> >> mode is also retained.
>> >>
>> >> Tested on omap4430 sdp.
>> >>
>> >> Signed-off-by: Venkatraman S <svenkatr@ti.com>
>> >
>> > I don't see any issues with this patch except the concern I had on the
>> first
>> > patch in the series. Why is that change linked to this series?
>> >
>>   Thanks. The problem was seen only in the context of using descriptor
>> load. Would
>> you prefer that I post it as a separate patch ?
>
> My point is why that change is needed for this feature to work?
>
> When DMA is completed and a callback is received the ch can be freed. Once
> TC is received the core is notified of the same.
>
> Can the first patch be dropped? Or do you see issues?
Yes there are issues without this patch when the scatterlist is large
(300+ blocks), where the dma completion interrupt is received but the
mmc driver hangs waiting for TC. I don't see the issue if I delay the
execution of omap_free_dma inside the dma callback.

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor autoloading feature
@ 2010-03-11 17:42         ` Venkatraman S
  0 siblings, 0 replies; 24+ messages in thread
From: Venkatraman S @ 2010-03-11 17:42 UTC (permalink / raw)
  To: linux-arm-kernel

Madhusudhan <madhu.cr@ti.com> wrote:
>> -----Original Message-----
>> From: svenkatr at gmail.com [mailto:svenkatr at gmail.com] On Behalf Of
>> Venkatraman S
>> Sent: Thursday, March 11, 2010 4:52 AM
>> To: Madhusudhan
>> Cc: linux-mmc at vger.kernel.org; linux-arm-kernel at lists.infradead.org;
>> linux-omap at vger.kernel.org
>> Subject: Re: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor
>> autoloading feature
>>
>> Madhusudhan <madhu.cr@ti.com> wrote:
>> >> -----Original Message-----
>> >> From: linux-mmc-owner at vger.kernel.org [mailto:linux-mmc-
>> >> owner at vger.kernel.org] On Behalf Of Venkatraman S
>> >> Sent: Monday, March 01, 2010 5:27 AM
>> >> To: linux-mmc at vger.kernel.org; linux-arm-kernel at lists.infradead.org;
>> >> linux-omap at vger.kernel.org
>> >> Subject: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor
>> >> autoloading feature
>> >>
>> >> Start to use the sDMA descriptor autoloading feature.
>> >> For large datablocks, the MMC driver has to repeatedly setup, program
>> >> and teardown the
>> >> dma channel for each element of the sglist received in
>> omap_hsmmc_request.
>> >>
>> >> By using descriptor autoloading, transfers from / to each element of
>> >> the sglist is pre programmed
>> >> into a linked list. The sDMA driver completes the entire transaction
>> >> and provides a single interrupt.
>> >>
>> >> Due to this, number of dma interrupts for a typical 100MB transfer on
>> the
>> >> MMC is
>> >> reduced from 25000 to about 400 (approximate). Transfer speeds are
>> >> improved by ~5%
>> >> (Though it varies on the size of read / write & improves on huge
>> >> transfers)
>> >>
>> >> Descriptor autoloading is available only in 3630 and 4430 (as of now).
>> >> Hence normal DMA
>> >> mode is also retained.
>> >>
>> >> Tested on omap4430 sdp.
>> >>
>> >> Signed-off-by: Venkatraman S <svenkatr@ti.com>
>> >
>> > I don't see any issues with this patch except the concern I had on the
>> first
>> > patch in the series. Why is that change linked to this series?
>> >
>> ? Thanks. The problem was seen only in the context of using descriptor
>> load. Would
>> you prefer that I post it as a separate patch ?
>
> My point is why that change is needed for this feature to work?
>
> When DMA is completed and a callback is received the ch can be freed. Once
> TC is received the core is notified of the same.
>
> Can the first patch be dropped? Or do you see issues?
Yes there are issues without this patch when the scatterlist is large
(300+ blocks), where the dma completion interrupt is received but the
mmc driver hangs waiting for TC. I don't see the issue if I delay the
execution of omap_free_dma inside the dma callback.

^ permalink raw reply	[flat|nested] 24+ messages in thread

* RE: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor autoloading feature
  2010-03-11 10:52     ` Venkatraman S
@ 2010-03-11 16:27       ` Madhusudhan
  -1 siblings, 0 replies; 24+ messages in thread
From: Madhusudhan @ 2010-03-11 16:27 UTC (permalink / raw)
  To: 'Venkatraman S'; +Cc: linux-mmc, linux-arm-kernel, linux-omap



> -----Original Message-----
> From: svenkatr@gmail.com [mailto:svenkatr@gmail.com] On Behalf Of
> Venkatraman S
> Sent: Thursday, March 11, 2010 4:52 AM
> To: Madhusudhan
> Cc: linux-mmc@vger.kernel.org; linux-arm-kernel@lists.infradead.org;
> linux-omap@vger.kernel.org
> Subject: Re: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor
> autoloading feature
> 
> Madhusudhan <madhu.cr@ti.com> wrote:
> >> -----Original Message-----
> >> From: linux-mmc-owner@vger.kernel.org [mailto:linux-mmc-
> >> owner@vger.kernel.org] On Behalf Of Venkatraman S
> >> Sent: Monday, March 01, 2010 5:27 AM
> >> To: linux-mmc@vger.kernel.org; linux-arm-kernel@lists.infradead.org;
> >> linux-omap@vger.kernel.org
> >> Subject: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor
> >> autoloading feature
> >>
> >> Start to use the sDMA descriptor autoloading feature.
> >> For large datablocks, the MMC driver has to repeatedly setup, program
> >> and teardown the
> >> dma channel for each element of the sglist received in
> omap_hsmmc_request.
> >>
> >> By using descriptor autoloading, transfers from / to each element of
> >> the sglist is pre programmed
> >> into a linked list. The sDMA driver completes the entire transaction
> >> and provides a single interrupt.
> >>
> >> Due to this, number of dma interrupts for a typical 100MB transfer on
> the
> >> MMC is
> >> reduced from 25000 to about 400 (approximate). Transfer speeds are
> >> improved by ~5%
> >> (Though it varies on the size of read / write & improves on huge
> >> transfers)
> >>
> >> Descriptor autoloading is available only in 3630 and 4430 (as of now).
> >> Hence normal DMA
> >> mode is also retained.
> >>
> >> Tested on omap4430 sdp.
> >>
> >> Signed-off-by: Venkatraman S <svenkatr@ti.com>
> >
> > I don't see any issues with this patch except the concern I had on the
> first
> > patch in the series. Why is that change linked to this series?
> >
>   Thanks. The problem was seen only in the context of using descriptor
> load. Would
> you prefer that I post it as a separate patch ?

My point is why that change is needed for this feature to work? 

When DMA is completed and a callback is received the ch can be freed. Once
TC is received the core is notified of the same.

Can the first patch be dropped? Or do you see issues?

> Regards,
> Venkat.


^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor autoloading feature
@ 2010-03-11 16:27       ` Madhusudhan
  0 siblings, 0 replies; 24+ messages in thread
From: Madhusudhan @ 2010-03-11 16:27 UTC (permalink / raw)
  To: linux-arm-kernel



> -----Original Message-----
> From: svenkatr at gmail.com [mailto:svenkatr at gmail.com] On Behalf Of
> Venkatraman S
> Sent: Thursday, March 11, 2010 4:52 AM
> To: Madhusudhan
> Cc: linux-mmc at vger.kernel.org; linux-arm-kernel at lists.infradead.org;
> linux-omap at vger.kernel.org
> Subject: Re: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor
> autoloading feature
> 
> Madhusudhan <madhu.cr@ti.com> wrote:
> >> -----Original Message-----
> >> From: linux-mmc-owner at vger.kernel.org [mailto:linux-mmc-
> >> owner at vger.kernel.org] On Behalf Of Venkatraman S
> >> Sent: Monday, March 01, 2010 5:27 AM
> >> To: linux-mmc at vger.kernel.org; linux-arm-kernel at lists.infradead.org;
> >> linux-omap at vger.kernel.org
> >> Subject: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor
> >> autoloading feature
> >>
> >> Start to use the sDMA descriptor autoloading feature.
> >> For large datablocks, the MMC driver has to repeatedly setup, program
> >> and teardown the
> >> dma channel for each element of the sglist received in
> omap_hsmmc_request.
> >>
> >> By using descriptor autoloading, transfers from / to each element of
> >> the sglist is pre programmed
> >> into a linked list. The sDMA driver completes the entire transaction
> >> and provides a single interrupt.
> >>
> >> Due to this, number of dma interrupts for a typical 100MB transfer on
> the
> >> MMC is
> >> reduced from 25000 to about 400 (approximate). Transfer speeds are
> >> improved by ~5%
> >> (Though it varies on the size of read / write & improves on huge
> >> transfers)
> >>
> >> Descriptor autoloading is available only in 3630 and 4430 (as of now).
> >> Hence normal DMA
> >> mode is also retained.
> >>
> >> Tested on omap4430 sdp.
> >>
> >> Signed-off-by: Venkatraman S <svenkatr@ti.com>
> >
> > I don't see any issues with this patch except the concern I had on the
> first
> > patch in the series. Why is that change linked to this series?
> >
>   Thanks. The problem was seen only in the context of using descriptor
> load. Would
> you prefer that I post it as a separate patch ?

My point is why that change is needed for this feature to work? 

When DMA is completed and a callback is received the ch can be freed. Once
TC is received the core is notified of the same.

Can the first patch be dropped? Or do you see issues?

> Regards,
> Venkat.

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor autoloading feature
  2010-03-11  1:01   ` Madhusudhan
@ 2010-03-11 10:52     ` Venkatraman S
  -1 siblings, 0 replies; 24+ messages in thread
From: Venkatraman S @ 2010-03-11 10:52 UTC (permalink / raw)
  To: Madhusudhan; +Cc: linux-mmc, linux-arm-kernel, linux-omap

Madhusudhan <madhu.cr@ti.com> wrote:
>> -----Original Message-----
>> From: linux-mmc-owner@vger.kernel.org [mailto:linux-mmc-
>> owner@vger.kernel.org] On Behalf Of Venkatraman S
>> Sent: Monday, March 01, 2010 5:27 AM
>> To: linux-mmc@vger.kernel.org; linux-arm-kernel@lists.infradead.org;
>> linux-omap@vger.kernel.org
>> Subject: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor
>> autoloading feature
>>
>> Start to use the sDMA descriptor autoloading feature.
>> For large datablocks, the MMC driver has to repeatedly setup, program
>> and teardown the
>> dma channel for each element of the sglist received in omap_hsmmc_request.
>>
>> By using descriptor autoloading, transfers from / to each element of
>> the sglist is pre programmed
>> into a linked list. The sDMA driver completes the entire transaction
>> and provides a single interrupt.
>>
>> Due to this, number of dma interrupts for a typical 100MB transfer on the
>> MMC is
>> reduced from 25000 to about 400 (approximate). Transfer speeds are
>> improved by ~5%
>> (Though it varies on the size of read / write & improves on huge
>> transfers)
>>
>> Descriptor autoloading is available only in 3630 and 4430 (as of now).
>> Hence normal DMA
>> mode is also retained.
>>
>> Tested on omap4430 sdp.
>>
>> Signed-off-by: Venkatraman S <svenkatr@ti.com>
>
> I don't see any issues with this patch except the concern I had on the first
> patch in the series. Why is that change linked to this series?
>
  Thanks. The problem was seen only in the context of using descriptor
load. Would
you prefer that I post it as a separate patch ?
Regards,
Venkat.

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor autoloading feature
@ 2010-03-11 10:52     ` Venkatraman S
  0 siblings, 0 replies; 24+ messages in thread
From: Venkatraman S @ 2010-03-11 10:52 UTC (permalink / raw)
  To: linux-arm-kernel

Madhusudhan <madhu.cr@ti.com> wrote:
>> -----Original Message-----
>> From: linux-mmc-owner at vger.kernel.org [mailto:linux-mmc-
>> owner at vger.kernel.org] On Behalf Of Venkatraman S
>> Sent: Monday, March 01, 2010 5:27 AM
>> To: linux-mmc at vger.kernel.org; linux-arm-kernel at lists.infradead.org;
>> linux-omap at vger.kernel.org
>> Subject: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor
>> autoloading feature
>>
>> Start to use the sDMA descriptor autoloading feature.
>> For large datablocks, the MMC driver has to repeatedly setup, program
>> and teardown the
>> dma channel for each element of the sglist received in omap_hsmmc_request.
>>
>> By using descriptor autoloading, transfers from / to each element of
>> the sglist is pre programmed
>> into a linked list. The sDMA driver completes the entire transaction
>> and provides a single interrupt.
>>
>> Due to this, number of dma interrupts for a typical 100MB transfer on the
>> MMC is
>> reduced from 25000 to about 400 (approximate). Transfer speeds are
>> improved by ~5%
>> (Though it varies on the size of read / write & improves on huge
>> transfers)
>>
>> Descriptor autoloading is available only in 3630 and 4430 (as of now).
>> Hence normal DMA
>> mode is also retained.
>>
>> Tested on omap4430 sdp.
>>
>> Signed-off-by: Venkatraman S <svenkatr@ti.com>
>
> I don't see any issues with this patch except the concern I had on the first
> patch in the series. Why is that change linked to this series?
>
  Thanks. The problem was seen only in the context of using descriptor
load. Would
you prefer that I post it as a separate patch ?
Regards,
Venkat.

^ permalink raw reply	[flat|nested] 24+ messages in thread

* RE: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor autoloading feature
  2010-03-01 11:27 ` Venkatraman S
@ 2010-03-11  1:01   ` Madhusudhan
  -1 siblings, 0 replies; 24+ messages in thread
From: Madhusudhan @ 2010-03-11  1:01 UTC (permalink / raw)
  To: 'Venkatraman S', linux-mmc, linux-arm-kernel, linux-omap



> -----Original Message-----
> From: linux-mmc-owner@vger.kernel.org [mailto:linux-mmc-
> owner@vger.kernel.org] On Behalf Of Venkatraman S
> Sent: Monday, March 01, 2010 5:27 AM
> To: linux-mmc@vger.kernel.org; linux-arm-kernel@lists.infradead.org;
> linux-omap@vger.kernel.org
> Subject: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor
> autoloading feature
> 
> Start to use the sDMA descriptor autoloading feature.
> For large datablocks, the MMC driver has to repeatedly setup, program
> and teardown the
> dma channel for each element of the sglist received in omap_hsmmc_request.
> 
> By using descriptor autoloading, transfers from / to each element of
> the sglist is pre programmed
> into a linked list. The sDMA driver completes the entire transaction
> and provides a single interrupt.
> 
> Due to this, number of dma interrupts for a typical 100MB transfer on the
> MMC is
> reduced from 25000 to about 400 (approximate). Transfer speeds are
> improved by ~5%
> (Though it varies on the size of read / write & improves on huge
> transfers)
> 
> Descriptor autoloading is available only in 3630 and 4430 (as of now).
> Hence normal DMA
> mode is also retained.
> 
> Tested on omap4430 sdp.
> 
> Signed-off-by: Venkatraman S <svenkatr@ti.com>

I don't see any issues with this patch except the concern I had on the first
patch in the series. Why is that change linked to this series?

> ---
>  drivers/mmc/host/omap_hsmmc.c |  143 +++++++++++++++++++++++++++++++++++-
> -----
>  1 files changed, 122 insertions(+), 21 deletions(-)
> 
> diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
> index 06337f6..425129b 100644
> --- a/drivers/mmc/host/omap_hsmmc.c
> +++ b/drivers/mmc/host/omap_hsmmc.c
> @@ -102,6 +102,7 @@
>  #define SRD			(1 << 26)
>  #define SOFTRESET		(1 << 1)
>  #define RESETDONE		(1 << 0)
> +#define DMA_ICR_QUIET		0xD00
> 
>  /*
>   * FIXME: Most likely all the data using these _DEVID defines should come
> @@ -118,6 +119,12 @@
>  #define OMAP_MMC_MASTER_CLOCK	96000000
>  #define DRIVER_NAME		"mmci-omap-hs"
> 
> +#define DMA_TYPE_NODMA	0
> +#define DMA_TYPE_SDMA	1
> +#define DMA_TYPE_SDMA_DLOAD 2
> +
> +#define DMA_CTRL_BUF_SIZE	(PAGE_SIZE * 3)
> +
>  /* Timeouts for entering power saving states on inactivity, msec */
>  #define OMAP_MMC_DISABLED_TIMEOUT	100
>  #define OMAP_MMC_SLEEP_TIMEOUT		1000
> @@ -172,7 +179,11 @@ struct omap_hsmmc_host {
>  	u32			bytesleft;
>  	int			suspended;
>  	int			irq;
> -	int			use_dma, dma_ch;
> +	int			dma_caps;
> +	int			dma_in_use;
> +	int			dma_ch;
> +	void		*dma_ctrl_buf;
> +	dma_addr_t 	dma_ctrl_buf_phy;
>  	int			dma_line_tx, dma_line_rx;
>  	int			slot_id;
>  	int			got_dbclk;
> @@ -768,7 +779,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host
> *host, struct mmc_command *cmd,
>  	OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
>  	OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
> 
> -	if (host->use_dma)
> +	if (host->dma_in_use)
>  		OMAP_HSMMC_WRITE(host->base, IE,
>  				 INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE));
>  	else
> @@ -803,7 +814,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host
> *host, struct mmc_command *cmd,
>  			cmdreg &= ~(DDIR);
>  	}
> 
> -	if (host->use_dma)
> +	if (host->dma_in_use)
>  		cmdreg |= DMA_EN;
> 
>  	/*
> @@ -850,7 +861,7 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host,
> struct mmc_data *data)
> 
>  	host->data = NULL;
> 
> -	if (host->use_dma && host->dma_ch != -1)
> +	if (host->dma_in_use && host->dma_ch != -1)
>  		dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
>  			omap_hsmmc_get_dma_dir(host, data));
> 
> @@ -900,7 +911,7 @@ static void omap_hsmmc_dma_cleanup(struct
> omap_hsmmc_host *host, int errno)
>  {
>  	host->data->error = errno;
> 
> -	if (host->use_dma && host->dma_ch != -1) {
> +	if (host->dma_in_use && host->dma_ch != -1) {
>  		dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host-
> >dma_len,
>  			omap_hsmmc_get_dma_dir(host, host->data));
>  		omap_free_dma(host->dma_ch);
> @@ -1253,7 +1264,6 @@ static void omap_hsmmc_config_dma_params(struct
> omap_hsmmc_host *host,
>  			omap_hsmmc_get_dma_sync_dev(host, data),
>  			!(data->flags & MMC_DATA_WRITE));
> 
> -	omap_start_dma(dma_ch);
>  }
> 
>  /*
> @@ -1268,21 +1278,32 @@ static void omap_hsmmc_dma_cb(int lch, u16
> ch_status, void *data)
> 
>  	if (host->dma_ch < 0)
>  		return;
> -
> -	host->dma_sg_idx++;
> -	if (host->dma_sg_idx < host->dma_len) {
> -		/* Fire up the next transfer. */
> -		omap_hsmmc_config_dma_params(host, host->data,
> +	if (host->dma_in_use == DMA_TYPE_SDMA) {
> +		host->dma_sg_idx++;
> +		if (host->dma_sg_idx < host->dma_len) {
> +			/* Fire up the next transfer. */
> +			omap_hsmmc_config_dma_params(host, host->data,
>  					   host->data->sg +
host->dma_sg_idx);
> -		return;
> +			omap_start_dma(host->dma_ch);
> +			return;
> +		}
>  	}
> 
>  }
> 
> +static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host)
> +{
> +	if (host->dma_in_use == DMA_TYPE_SDMA)
> +		omap_start_dma(host->dma_ch);
> +	else if (host->dma_in_use == DMA_TYPE_SDMA_DLOAD)
> +		return omap_start_dma_sglist_transfers(host->dma_ch, -1);
> +
> +	return 0;
> +}
>  /*
> - * Routine to configure and start DMA for the MMC card
> + * Routine to configure DMA for the MMC card
>   */
> -static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
> +static int omap_hsmmc_configure_sdma(struct omap_hsmmc_host *host,
>  					struct mmc_request *req)
>  {
>  	int dma_ch = 0, ret = 0, err = 1, i;
> @@ -1339,6 +1360,56 @@ static int omap_hsmmc_start_dma_transfer(struct
> omap_hsmmc_host *host,
>  	return 0;
>  }
> 
> +static int omap_hsmmc_configure_sdma_sglist(struct omap_hsmmc_host *host,
> +		struct mmc_request *req)
> +{
> +	int i;
> +	struct omap_dma_sglist_node *sglist, *snode;
> +	struct mmc_data *data = req->data;
> +	int blksz;
> +	int dmadir = omap_hsmmc_get_dma_dir(host, data);
> +	struct omap_dma_sglist_type2a_params *t2p;
> +
> +	sglist = (struct omap_dma_sglist_node *) host->dma_ctrl_buf;
> +	snode = sglist;
> +	blksz = host->data->blksz;
> +
> +	if ((host->dma_len * sizeof(*snode)) > DMA_CTRL_BUF_SIZE) {
> +		dev_err(mmc_dev(host->mmc), "not enough sglist memory %d\n",
> +			host->dma_len);
> +		return -ENOMEM;
> +	}
> +	for (i = 0; i < host->dma_len; snode++, i++) {
> +		snode->desc_type = OMAP_DMA_SGLIST_DESCRIPTOR_TYPE2a;
> +		snode->num_of_elem = blksz / 4;
> +		t2p = &snode->sg_node.t2a;
> +
> +		if (dmadir == DMA_FROM_DEVICE) {
> +			t2p->src_addr = host->mapbase + OMAP_HSMMC_DATA;
> +			t2p->dst_addr = sg_dma_address(data->sg + i);
> +		} else {
> +			t2p->dst_addr = host->mapbase + OMAP_HSMMC_DATA;
> +			t2p->src_addr = sg_dma_address(data->sg + i);
> +		}
> +		snode->flags =
> +			OMAP_DMA_LIST_DST_VALID | OMAP_DMA_LIST_SRC_VALID;
> +
> +		t2p->cfn_fn = sg_dma_len(data->sg + i) / host->data->blksz;
> +		t2p->cicr = DMA_ICR_QUIET;
> +
> +		t2p->dst_frame_idx_or_pkt_size = 0;
> +		t2p->src_frame_idx_or_pkt_size = 0;
> +		t2p->dst_elem_idx = 0;
> +		t2p->src_elem_idx = 0;
> +	}
> +	dev_dbg(mmc_dev(host->mmc), "new sglist %x len =%d\n",
> +			host->dma_ctrl_buf_phy, i);
> +	omap_set_dma_sglist_mode(host->dma_ch, sglist,
> +			host->dma_ctrl_buf_phy, i, NULL);
> +	omap_dma_set_sglist_fastmode(host->dma_ch, 1);
> +	return 0;
> +}
> +
>  static void set_data_timeout(struct omap_hsmmc_host *host,
>  			     unsigned int timeout_ns,
>  			     unsigned int timeout_clks)
> @@ -1400,14 +1471,23 @@ omap_hsmmc_prepare_data(struct omap_hsmmc_host
> *host, struct mmc_request *req)
>  					| (req->data->blocks << 16));
>  	set_data_timeout(host, req->data->timeout_ns, req->data-
> >timeout_clks);
> 
> -	if (host->use_dma) {
> -		ret = omap_hsmmc_start_dma_transfer(host, req);
> -		if (ret != 0) {
> -			dev_dbg(mmc_dev(host->mmc), "MMC start dma
failure\n");
> +	if (host->dma_caps & DMA_TYPE_SDMA) {
> +		ret = omap_hsmmc_configure_sdma(host, req);
> +		if (ret)
>  			return ret;
> -		}
> +		host->dma_in_use = DMA_TYPE_SDMA;
>  	}
> -	return 0;
> +	if ((host->dma_caps & DMA_TYPE_SDMA_DLOAD) &&
> +		host->data->sg_len > 4) {
> +		ret = omap_hsmmc_configure_sdma_sglist(host, req);
> +		if (ret)
> +			return ret;
> +		host->dma_in_use = DMA_TYPE_SDMA_DLOAD;
> +
> +	}
> +	ret = omap_hsmmc_start_dma_transfer(host);
> +	return ret;
> +
>  }
> 
>  /*
> @@ -1999,7 +2079,9 @@ static int __init omap_hsmmc_probe(struct
> platform_device *pdev)
>  	host->mmc	= mmc;
>  	host->pdata	= pdata;
>  	host->dev	= &pdev->dev;
> -	host->use_dma	= 1;
> +	host->dma_caps	= DMA_TYPE_SDMA;
> +	host->dma_in_use	= DMA_TYPE_NODMA;
> +	host->dma_ctrl_buf = NULL;
>  	host->dev->dma_mask = &pdata->dma_mask;
>  	host->dma_ch	= -1;
>  	host->irq	= irq;
> @@ -2081,6 +2163,15 @@ static int __init omap_hsmmc_probe(struct
> platform_device *pdev)
>  							" clk failed\n");
>  	}
> 
> +	if (cpu_is_omap44xx() || cpu_is_omap3630()) {
> +		host->dma_ctrl_buf = dma_alloc_coherent(NULL,
> +					DMA_CTRL_BUF_SIZE,
> +					&host->dma_ctrl_buf_phy,
> +					0);
> +		if (host->dma_ctrl_buf != NULL)
> +			host->dma_caps |= DMA_TYPE_SDMA_DLOAD;
> +	}
> +
>  	/* Since we do only SG emulation, we can have as many segs
>  	 * as we want. */
>  	mmc->max_phys_segs = 1024;
> @@ -2207,6 +2298,10 @@ err_reg:
>  err_irq_cd_init:
>  	free_irq(host->irq, host);
>  err_irq:
> +	if (host->dma_ctrl_buf)
> +		dma_free_coherent(NULL, DMA_CTRL_BUF_SIZE,
> +				host->dma_ctrl_buf,
> +				host->dma_ctrl_buf_phy);
>  	mmc_host_disable(host->mmc);
>  	clk_disable(host->iclk);
>  	clk_put(host->fclk);
> @@ -2234,6 +2329,12 @@ static int omap_hsmmc_remove(struct
> platform_device *pdev)
>  	if (host) {
>  		mmc_host_enable(host->mmc);
>  		mmc_remove_host(host->mmc);
> +
> +		if (host->dma_ctrl_buf != NULL) {
> +			dma_free_coherent(NULL, DMA_CTRL_BUF_SIZE,
> +				host->dma_ctrl_buf,
> +				host->dma_ctrl_buf_phy);
> +		}
>  		if (host->use_reg)
>  			omap_hsmmc_reg_put(host);
>  		if (host->pdata->cleanup)
> --
> 1.6.3.3
> --
> To unsubscribe from this list: send the line "unsubscribe linux-mmc" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html


^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor autoloading feature
@ 2010-03-11  1:01   ` Madhusudhan
  0 siblings, 0 replies; 24+ messages in thread
From: Madhusudhan @ 2010-03-11  1:01 UTC (permalink / raw)
  To: linux-arm-kernel



> -----Original Message-----
> From: linux-mmc-owner at vger.kernel.org [mailto:linux-mmc-
> owner at vger.kernel.org] On Behalf Of Venkatraman S
> Sent: Monday, March 01, 2010 5:27 AM
> To: linux-mmc at vger.kernel.org; linux-arm-kernel at lists.infradead.org;
> linux-omap at vger.kernel.org
> Subject: [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor
> autoloading feature
> 
> Start to use the sDMA descriptor autoloading feature.
> For large datablocks, the MMC driver has to repeatedly setup, program
> and teardown the
> dma channel for each element of the sglist received in omap_hsmmc_request.
> 
> By using descriptor autoloading, transfers from / to each element of
> the sglist is pre programmed
> into a linked list. The sDMA driver completes the entire transaction
> and provides a single interrupt.
> 
> Due to this, number of dma interrupts for a typical 100MB transfer on the
> MMC is
> reduced from 25000 to about 400 (approximate). Transfer speeds are
> improved by ~5%
> (Though it varies on the size of read / write & improves on huge
> transfers)
> 
> Descriptor autoloading is available only in 3630 and 4430 (as of now).
> Hence normal DMA
> mode is also retained.
> 
> Tested on omap4430 sdp.
> 
> Signed-off-by: Venkatraman S <svenkatr@ti.com>

I don't see any issues with this patch except the concern I had on the first
patch in the series. Why is that change linked to this series?

> ---
>  drivers/mmc/host/omap_hsmmc.c |  143 +++++++++++++++++++++++++++++++++++-
> -----
>  1 files changed, 122 insertions(+), 21 deletions(-)
> 
> diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
> index 06337f6..425129b 100644
> --- a/drivers/mmc/host/omap_hsmmc.c
> +++ b/drivers/mmc/host/omap_hsmmc.c
> @@ -102,6 +102,7 @@
>  #define SRD			(1 << 26)
>  #define SOFTRESET		(1 << 1)
>  #define RESETDONE		(1 << 0)
> +#define DMA_ICR_QUIET		0xD00
> 
>  /*
>   * FIXME: Most likely all the data using these _DEVID defines should come
> @@ -118,6 +119,12 @@
>  #define OMAP_MMC_MASTER_CLOCK	96000000
>  #define DRIVER_NAME		"mmci-omap-hs"
> 
> +#define DMA_TYPE_NODMA	0
> +#define DMA_TYPE_SDMA	1
> +#define DMA_TYPE_SDMA_DLOAD 2
> +
> +#define DMA_CTRL_BUF_SIZE	(PAGE_SIZE * 3)
> +
>  /* Timeouts for entering power saving states on inactivity, msec */
>  #define OMAP_MMC_DISABLED_TIMEOUT	100
>  #define OMAP_MMC_SLEEP_TIMEOUT		1000
> @@ -172,7 +179,11 @@ struct omap_hsmmc_host {
>  	u32			bytesleft;
>  	int			suspended;
>  	int			irq;
> -	int			use_dma, dma_ch;
> +	int			dma_caps;
> +	int			dma_in_use;
> +	int			dma_ch;
> +	void		*dma_ctrl_buf;
> +	dma_addr_t 	dma_ctrl_buf_phy;
>  	int			dma_line_tx, dma_line_rx;
>  	int			slot_id;
>  	int			got_dbclk;
> @@ -768,7 +779,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host
> *host, struct mmc_command *cmd,
>  	OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
>  	OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
> 
> -	if (host->use_dma)
> +	if (host->dma_in_use)
>  		OMAP_HSMMC_WRITE(host->base, IE,
>  				 INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE));
>  	else
> @@ -803,7 +814,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host
> *host, struct mmc_command *cmd,
>  			cmdreg &= ~(DDIR);
>  	}
> 
> -	if (host->use_dma)
> +	if (host->dma_in_use)
>  		cmdreg |= DMA_EN;
> 
>  	/*
> @@ -850,7 +861,7 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host,
> struct mmc_data *data)
> 
>  	host->data = NULL;
> 
> -	if (host->use_dma && host->dma_ch != -1)
> +	if (host->dma_in_use && host->dma_ch != -1)
>  		dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
>  			omap_hsmmc_get_dma_dir(host, data));
> 
> @@ -900,7 +911,7 @@ static void omap_hsmmc_dma_cleanup(struct
> omap_hsmmc_host *host, int errno)
>  {
>  	host->data->error = errno;
> 
> -	if (host->use_dma && host->dma_ch != -1) {
> +	if (host->dma_in_use && host->dma_ch != -1) {
>  		dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host-
> >dma_len,
>  			omap_hsmmc_get_dma_dir(host, host->data));
>  		omap_free_dma(host->dma_ch);
> @@ -1253,7 +1264,6 @@ static void omap_hsmmc_config_dma_params(struct
> omap_hsmmc_host *host,
>  			omap_hsmmc_get_dma_sync_dev(host, data),
>  			!(data->flags & MMC_DATA_WRITE));
> 
> -	omap_start_dma(dma_ch);
>  }
> 
>  /*
> @@ -1268,21 +1278,32 @@ static void omap_hsmmc_dma_cb(int lch, u16
> ch_status, void *data)
> 
>  	if (host->dma_ch < 0)
>  		return;
> -
> -	host->dma_sg_idx++;
> -	if (host->dma_sg_idx < host->dma_len) {
> -		/* Fire up the next transfer. */
> -		omap_hsmmc_config_dma_params(host, host->data,
> +	if (host->dma_in_use == DMA_TYPE_SDMA) {
> +		host->dma_sg_idx++;
> +		if (host->dma_sg_idx < host->dma_len) {
> +			/* Fire up the next transfer. */
> +			omap_hsmmc_config_dma_params(host, host->data,
>  					   host->data->sg +
host->dma_sg_idx);
> -		return;
> +			omap_start_dma(host->dma_ch);
> +			return;
> +		}
>  	}
> 
>  }
> 
> +static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host)
> +{
> +	if (host->dma_in_use == DMA_TYPE_SDMA)
> +		omap_start_dma(host->dma_ch);
> +	else if (host->dma_in_use == DMA_TYPE_SDMA_DLOAD)
> +		return omap_start_dma_sglist_transfers(host->dma_ch, -1);
> +
> +	return 0;
> +}
>  /*
> - * Routine to configure and start DMA for the MMC card
> + * Routine to configure DMA for the MMC card
>   */
> -static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
> +static int omap_hsmmc_configure_sdma(struct omap_hsmmc_host *host,
>  					struct mmc_request *req)
>  {
>  	int dma_ch = 0, ret = 0, err = 1, i;
> @@ -1339,6 +1360,56 @@ static int omap_hsmmc_start_dma_transfer(struct
> omap_hsmmc_host *host,
>  	return 0;
>  }
> 
> +static int omap_hsmmc_configure_sdma_sglist(struct omap_hsmmc_host *host,
> +		struct mmc_request *req)
> +{
> +	int i;
> +	struct omap_dma_sglist_node *sglist, *snode;
> +	struct mmc_data *data = req->data;
> +	int blksz;
> +	int dmadir = omap_hsmmc_get_dma_dir(host, data);
> +	struct omap_dma_sglist_type2a_params *t2p;
> +
> +	sglist = (struct omap_dma_sglist_node *) host->dma_ctrl_buf;
> +	snode = sglist;
> +	blksz = host->data->blksz;
> +
> +	if ((host->dma_len * sizeof(*snode)) > DMA_CTRL_BUF_SIZE) {
> +		dev_err(mmc_dev(host->mmc), "not enough sglist memory %d\n",
> +			host->dma_len);
> +		return -ENOMEM;
> +	}
> +	for (i = 0; i < host->dma_len; snode++, i++) {
> +		snode->desc_type = OMAP_DMA_SGLIST_DESCRIPTOR_TYPE2a;
> +		snode->num_of_elem = blksz / 4;
> +		t2p = &snode->sg_node.t2a;
> +
> +		if (dmadir == DMA_FROM_DEVICE) {
> +			t2p->src_addr = host->mapbase + OMAP_HSMMC_DATA;
> +			t2p->dst_addr = sg_dma_address(data->sg + i);
> +		} else {
> +			t2p->dst_addr = host->mapbase + OMAP_HSMMC_DATA;
> +			t2p->src_addr = sg_dma_address(data->sg + i);
> +		}
> +		snode->flags =
> +			OMAP_DMA_LIST_DST_VALID | OMAP_DMA_LIST_SRC_VALID;
> +
> +		t2p->cfn_fn = sg_dma_len(data->sg + i) / host->data->blksz;
> +		t2p->cicr = DMA_ICR_QUIET;
> +
> +		t2p->dst_frame_idx_or_pkt_size = 0;
> +		t2p->src_frame_idx_or_pkt_size = 0;
> +		t2p->dst_elem_idx = 0;
> +		t2p->src_elem_idx = 0;
> +	}
> +	dev_dbg(mmc_dev(host->mmc), "new sglist %x len =%d\n",
> +			host->dma_ctrl_buf_phy, i);
> +	omap_set_dma_sglist_mode(host->dma_ch, sglist,
> +			host->dma_ctrl_buf_phy, i, NULL);
> +	omap_dma_set_sglist_fastmode(host->dma_ch, 1);
> +	return 0;
> +}
> +
>  static void set_data_timeout(struct omap_hsmmc_host *host,
>  			     unsigned int timeout_ns,
>  			     unsigned int timeout_clks)
> @@ -1400,14 +1471,23 @@ omap_hsmmc_prepare_data(struct omap_hsmmc_host
> *host, struct mmc_request *req)
>  					| (req->data->blocks << 16));
>  	set_data_timeout(host, req->data->timeout_ns, req->data-
> >timeout_clks);
> 
> -	if (host->use_dma) {
> -		ret = omap_hsmmc_start_dma_transfer(host, req);
> -		if (ret != 0) {
> -			dev_dbg(mmc_dev(host->mmc), "MMC start dma
failure\n");
> +	if (host->dma_caps & DMA_TYPE_SDMA) {
> +		ret = omap_hsmmc_configure_sdma(host, req);
> +		if (ret)
>  			return ret;
> -		}
> +		host->dma_in_use = DMA_TYPE_SDMA;
>  	}
> -	return 0;
> +	if ((host->dma_caps & DMA_TYPE_SDMA_DLOAD) &&
> +		host->data->sg_len > 4) {
> +		ret = omap_hsmmc_configure_sdma_sglist(host, req);
> +		if (ret)
> +			return ret;
> +		host->dma_in_use = DMA_TYPE_SDMA_DLOAD;
> +
> +	}
> +	ret = omap_hsmmc_start_dma_transfer(host);
> +	return ret;
> +
>  }
> 
>  /*
> @@ -1999,7 +2079,9 @@ static int __init omap_hsmmc_probe(struct
> platform_device *pdev)
>  	host->mmc	= mmc;
>  	host->pdata	= pdata;
>  	host->dev	= &pdev->dev;
> -	host->use_dma	= 1;
> +	host->dma_caps	= DMA_TYPE_SDMA;
> +	host->dma_in_use	= DMA_TYPE_NODMA;
> +	host->dma_ctrl_buf = NULL;
>  	host->dev->dma_mask = &pdata->dma_mask;
>  	host->dma_ch	= -1;
>  	host->irq	= irq;
> @@ -2081,6 +2163,15 @@ static int __init omap_hsmmc_probe(struct
> platform_device *pdev)
>  							" clk failed\n");
>  	}
> 
> +	if (cpu_is_omap44xx() || cpu_is_omap3630()) {
> +		host->dma_ctrl_buf = dma_alloc_coherent(NULL,
> +					DMA_CTRL_BUF_SIZE,
> +					&host->dma_ctrl_buf_phy,
> +					0);
> +		if (host->dma_ctrl_buf != NULL)
> +			host->dma_caps |= DMA_TYPE_SDMA_DLOAD;
> +	}
> +
>  	/* Since we do only SG emulation, we can have as many segs
>  	 * as we want. */
>  	mmc->max_phys_segs = 1024;
> @@ -2207,6 +2298,10 @@ err_reg:
>  err_irq_cd_init:
>  	free_irq(host->irq, host);
>  err_irq:
> +	if (host->dma_ctrl_buf)
> +		dma_free_coherent(NULL, DMA_CTRL_BUF_SIZE,
> +				host->dma_ctrl_buf,
> +				host->dma_ctrl_buf_phy);
>  	mmc_host_disable(host->mmc);
>  	clk_disable(host->iclk);
>  	clk_put(host->fclk);
> @@ -2234,6 +2329,12 @@ static int omap_hsmmc_remove(struct
> platform_device *pdev)
>  	if (host) {
>  		mmc_host_enable(host->mmc);
>  		mmc_remove_host(host->mmc);
> +
> +		if (host->dma_ctrl_buf != NULL) {
> +			dma_free_coherent(NULL, DMA_CTRL_BUF_SIZE,
> +				host->dma_ctrl_buf,
> +				host->dma_ctrl_buf_phy);
> +		}
>  		if (host->use_reg)
>  			omap_hsmmc_reg_put(host);
>  		if (host->pdata->cleanup)
> --
> 1.6.3.3
> --
> To unsubscribe from this list: send the line "unsubscribe linux-mmc" in
> the body of a message to majordomo at vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor autoloading feature
@ 2010-03-01 11:27 ` Venkatraman S
  0 siblings, 0 replies; 24+ messages in thread
From: Venkatraman S @ 2010-03-01 11:27 UTC (permalink / raw)
  To: linux-mmc, linux-arm-kernel, linux-omap

Start to use the sDMA descriptor autoloading feature.
For large datablocks, the MMC driver has to repeatedly setup, program
and teardown the
dma channel for each element of the sglist received in omap_hsmmc_request.

By using descriptor autoloading, transfers from / to each element of
the sglist is pre programmed
into a linked list. The sDMA driver completes the entire transaction
and provides a single interrupt.

Due to this, number of dma interrupts for a typical 100MB transfer on the MMC is
reduced from 25000 to about 400 (approximate). Transfer speeds are
improved by ~5%
(Though it varies on the size of read / write & improves on huge transfers)

Descriptor autoloading is available only in 3630 and 4430 (as of now).
Hence normal DMA
mode is also retained.

Tested on omap4430 sdp.

Signed-off-by: Venkatraman S <svenkatr@ti.com>
---
 drivers/mmc/host/omap_hsmmc.c |  143 +++++++++++++++++++++++++++++++++++------
 1 files changed, 122 insertions(+), 21 deletions(-)

diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 06337f6..425129b 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -102,6 +102,7 @@
 #define SRD			(1 << 26)
 #define SOFTRESET		(1 << 1)
 #define RESETDONE		(1 << 0)
+#define DMA_ICR_QUIET		0xD00

 /*
  * FIXME: Most likely all the data using these _DEVID defines should come
@@ -118,6 +119,12 @@
 #define OMAP_MMC_MASTER_CLOCK	96000000
 #define DRIVER_NAME		"mmci-omap-hs"

+#define DMA_TYPE_NODMA	0
+#define DMA_TYPE_SDMA	1
+#define DMA_TYPE_SDMA_DLOAD 2
+
+#define DMA_CTRL_BUF_SIZE	(PAGE_SIZE * 3)
+
 /* Timeouts for entering power saving states on inactivity, msec */
 #define OMAP_MMC_DISABLED_TIMEOUT	100
 #define OMAP_MMC_SLEEP_TIMEOUT		1000
@@ -172,7 +179,11 @@ struct omap_hsmmc_host {
 	u32			bytesleft;
 	int			suspended;
 	int			irq;
-	int			use_dma, dma_ch;
+	int			dma_caps;
+	int			dma_in_use;
+	int			dma_ch;
+	void		*dma_ctrl_buf;
+	dma_addr_t 	dma_ctrl_buf_phy;
 	int			dma_line_tx, dma_line_rx;
 	int			slot_id;
 	int			got_dbclk;
@@ -768,7 +779,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host
*host, struct mmc_command *cmd,
 	OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
 	OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);

-	if (host->use_dma)
+	if (host->dma_in_use)
 		OMAP_HSMMC_WRITE(host->base, IE,
 				 INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE));
 	else
@@ -803,7 +814,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host
*host, struct mmc_command *cmd,
 			cmdreg &= ~(DDIR);
 	}

-	if (host->use_dma)
+	if (host->dma_in_use)
 		cmdreg |= DMA_EN;

 	/*
@@ -850,7 +861,7 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host,
struct mmc_data *data)

 	host->data = NULL;

-	if (host->use_dma && host->dma_ch != -1)
+	if (host->dma_in_use && host->dma_ch != -1)
 		dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
 			omap_hsmmc_get_dma_dir(host, data));

@@ -900,7 +911,7 @@ static void omap_hsmmc_dma_cleanup(struct
omap_hsmmc_host *host, int errno)
 {
 	host->data->error = errno;

-	if (host->use_dma && host->dma_ch != -1) {
+	if (host->dma_in_use && host->dma_ch != -1) {
 		dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len,
 			omap_hsmmc_get_dma_dir(host, host->data));
 		omap_free_dma(host->dma_ch);
@@ -1253,7 +1264,6 @@ static void omap_hsmmc_config_dma_params(struct
omap_hsmmc_host *host,
 			omap_hsmmc_get_dma_sync_dev(host, data),
 			!(data->flags & MMC_DATA_WRITE));

-	omap_start_dma(dma_ch);
 }

 /*
@@ -1268,21 +1278,32 @@ static void omap_hsmmc_dma_cb(int lch, u16
ch_status, void *data)

 	if (host->dma_ch < 0)
 		return;
-
-	host->dma_sg_idx++;
-	if (host->dma_sg_idx < host->dma_len) {
-		/* Fire up the next transfer. */
-		omap_hsmmc_config_dma_params(host, host->data,
+	if (host->dma_in_use == DMA_TYPE_SDMA) {
+		host->dma_sg_idx++;
+		if (host->dma_sg_idx < host->dma_len) {
+			/* Fire up the next transfer. */
+			omap_hsmmc_config_dma_params(host, host->data,
 					   host->data->sg + host->dma_sg_idx);
-		return;
+			omap_start_dma(host->dma_ch);
+			return;
+		}
 	}

 }

+static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host)
+{
+	if (host->dma_in_use == DMA_TYPE_SDMA)
+		omap_start_dma(host->dma_ch);
+	else if (host->dma_in_use == DMA_TYPE_SDMA_DLOAD)
+		return omap_start_dma_sglist_transfers(host->dma_ch, -1);
+
+	return 0;
+}
 /*
- * Routine to configure and start DMA for the MMC card
+ * Routine to configure DMA for the MMC card
  */
-static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
+static int omap_hsmmc_configure_sdma(struct omap_hsmmc_host *host,
 					struct mmc_request *req)
 {
 	int dma_ch = 0, ret = 0, err = 1, i;
@@ -1339,6 +1360,56 @@ static int omap_hsmmc_start_dma_transfer(struct
omap_hsmmc_host *host,
 	return 0;
 }

+static int omap_hsmmc_configure_sdma_sglist(struct omap_hsmmc_host *host,
+		struct mmc_request *req)
+{
+	int i;
+	struct omap_dma_sglist_node *sglist, *snode;
+	struct mmc_data *data = req->data;
+	int blksz;
+	int dmadir = omap_hsmmc_get_dma_dir(host, data);
+	struct omap_dma_sglist_type2a_params *t2p;
+
+	sglist = (struct omap_dma_sglist_node *) host->dma_ctrl_buf;
+	snode = sglist;
+	blksz = host->data->blksz;
+
+	if ((host->dma_len * sizeof(*snode)) > DMA_CTRL_BUF_SIZE) {
+		dev_err(mmc_dev(host->mmc), "not enough sglist memory %d\n",
+			host->dma_len);
+		return -ENOMEM;
+	}
+	for (i = 0; i < host->dma_len; snode++, i++) {
+		snode->desc_type = OMAP_DMA_SGLIST_DESCRIPTOR_TYPE2a;
+		snode->num_of_elem = blksz / 4;
+		t2p = &snode->sg_node.t2a;
+
+		if (dmadir == DMA_FROM_DEVICE) {
+			t2p->src_addr = host->mapbase + OMAP_HSMMC_DATA;
+			t2p->dst_addr = sg_dma_address(data->sg + i);
+		} else {
+			t2p->dst_addr = host->mapbase + OMAP_HSMMC_DATA;
+			t2p->src_addr = sg_dma_address(data->sg + i);
+		}
+		snode->flags =
+			OMAP_DMA_LIST_DST_VALID | OMAP_DMA_LIST_SRC_VALID;
+
+		t2p->cfn_fn = sg_dma_len(data->sg + i) / host->data->blksz;
+		t2p->cicr = DMA_ICR_QUIET;
+
+		t2p->dst_frame_idx_or_pkt_size = 0;
+		t2p->src_frame_idx_or_pkt_size = 0;
+		t2p->dst_elem_idx = 0;
+		t2p->src_elem_idx = 0;
+	}
+	dev_dbg(mmc_dev(host->mmc), "new sglist %x len =%d\n",
+			host->dma_ctrl_buf_phy, i);
+	omap_set_dma_sglist_mode(host->dma_ch, sglist,
+			host->dma_ctrl_buf_phy, i, NULL);
+	omap_dma_set_sglist_fastmode(host->dma_ch, 1);
+	return 0;
+}
+
 static void set_data_timeout(struct omap_hsmmc_host *host,
 			     unsigned int timeout_ns,
 			     unsigned int timeout_clks)
@@ -1400,14 +1471,23 @@ omap_hsmmc_prepare_data(struct omap_hsmmc_host
*host, struct mmc_request *req)
 					| (req->data->blocks << 16));
 	set_data_timeout(host, req->data->timeout_ns, req->data->timeout_clks);

-	if (host->use_dma) {
-		ret = omap_hsmmc_start_dma_transfer(host, req);
-		if (ret != 0) {
-			dev_dbg(mmc_dev(host->mmc), "MMC start dma failure\n");
+	if (host->dma_caps & DMA_TYPE_SDMA) {
+		ret = omap_hsmmc_configure_sdma(host, req);
+		if (ret)
 			return ret;
-		}
+		host->dma_in_use = DMA_TYPE_SDMA;
 	}
-	return 0;
+	if ((host->dma_caps & DMA_TYPE_SDMA_DLOAD) &&
+		host->data->sg_len > 4) {
+		ret = omap_hsmmc_configure_sdma_sglist(host, req);
+		if (ret)
+			return ret;
+		host->dma_in_use = DMA_TYPE_SDMA_DLOAD;
+
+	}
+	ret = omap_hsmmc_start_dma_transfer(host);
+	return ret;
+
 }

 /*
@@ -1999,7 +2079,9 @@ static int __init omap_hsmmc_probe(struct
platform_device *pdev)
 	host->mmc	= mmc;
 	host->pdata	= pdata;
 	host->dev	= &pdev->dev;
-	host->use_dma	= 1;
+	host->dma_caps	= DMA_TYPE_SDMA;
+	host->dma_in_use	= DMA_TYPE_NODMA;
+	host->dma_ctrl_buf = NULL;
 	host->dev->dma_mask = &pdata->dma_mask;
 	host->dma_ch	= -1;
 	host->irq	= irq;
@@ -2081,6 +2163,15 @@ static int __init omap_hsmmc_probe(struct
platform_device *pdev)
 							" clk failed\n");
 	}

+	if (cpu_is_omap44xx() || cpu_is_omap3630()) {
+		host->dma_ctrl_buf = dma_alloc_coherent(NULL,
+					DMA_CTRL_BUF_SIZE,
+					&host->dma_ctrl_buf_phy,
+					0);
+		if (host->dma_ctrl_buf != NULL)
+			host->dma_caps |= DMA_TYPE_SDMA_DLOAD;
+	}
+
 	/* Since we do only SG emulation, we can have as many segs
 	 * as we want. */
 	mmc->max_phys_segs = 1024;
@@ -2207,6 +2298,10 @@ err_reg:
 err_irq_cd_init:
 	free_irq(host->irq, host);
 err_irq:
+	if (host->dma_ctrl_buf)
+		dma_free_coherent(NULL, DMA_CTRL_BUF_SIZE,
+				host->dma_ctrl_buf,
+				host->dma_ctrl_buf_phy);
 	mmc_host_disable(host->mmc);
 	clk_disable(host->iclk);
 	clk_put(host->fclk);
@@ -2234,6 +2329,12 @@ static int omap_hsmmc_remove(struct
platform_device *pdev)
 	if (host) {
 		mmc_host_enable(host->mmc);
 		mmc_remove_host(host->mmc);
+
+		if (host->dma_ctrl_buf != NULL) {
+			dma_free_coherent(NULL, DMA_CTRL_BUF_SIZE,
+				host->dma_ctrl_buf,
+				host->dma_ctrl_buf_phy);
+		}
 		if (host->use_reg)
 			omap_hsmmc_reg_put(host);
 		if (host->pdata->cleanup)
-- 
1.6.3.3

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor autoloading feature
@ 2010-03-01 11:27 ` Venkatraman S
  0 siblings, 0 replies; 24+ messages in thread
From: Venkatraman S @ 2010-03-01 11:27 UTC (permalink / raw)
  To: linux-arm-kernel

Start to use the sDMA descriptor autoloading feature.
For large datablocks, the MMC driver has to repeatedly setup, program
and teardown the
dma channel for each element of the sglist received in omap_hsmmc_request.

By using descriptor autoloading, transfers from / to each element of
the sglist is pre programmed
into a linked list. The sDMA driver completes the entire transaction
and provides a single interrupt.

Due to this, number of dma interrupts for a typical 100MB transfer on the MMC is
reduced from 25000 to about 400 (approximate). Transfer speeds are
improved by ~5%
(Though it varies on the size of read / write & improves on huge transfers)

Descriptor autoloading is available only in 3630 and 4430 (as of now).
Hence normal DMA
mode is also retained.

Tested on omap4430 sdp.

Signed-off-by: Venkatraman S <svenkatr@ti.com>
---
 drivers/mmc/host/omap_hsmmc.c |  143 +++++++++++++++++++++++++++++++++++------
 1 files changed, 122 insertions(+), 21 deletions(-)

diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 06337f6..425129b 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -102,6 +102,7 @@
 #define SRD			(1 << 26)
 #define SOFTRESET		(1 << 1)
 #define RESETDONE		(1 << 0)
+#define DMA_ICR_QUIET		0xD00

 /*
  * FIXME: Most likely all the data using these _DEVID defines should come
@@ -118,6 +119,12 @@
 #define OMAP_MMC_MASTER_CLOCK	96000000
 #define DRIVER_NAME		"mmci-omap-hs"

+#define DMA_TYPE_NODMA	0
+#define DMA_TYPE_SDMA	1
+#define DMA_TYPE_SDMA_DLOAD 2
+
+#define DMA_CTRL_BUF_SIZE	(PAGE_SIZE * 3)
+
 /* Timeouts for entering power saving states on inactivity, msec */
 #define OMAP_MMC_DISABLED_TIMEOUT	100
 #define OMAP_MMC_SLEEP_TIMEOUT		1000
@@ -172,7 +179,11 @@ struct omap_hsmmc_host {
 	u32			bytesleft;
 	int			suspended;
 	int			irq;
-	int			use_dma, dma_ch;
+	int			dma_caps;
+	int			dma_in_use;
+	int			dma_ch;
+	void		*dma_ctrl_buf;
+	dma_addr_t 	dma_ctrl_buf_phy;
 	int			dma_line_tx, dma_line_rx;
 	int			slot_id;
 	int			got_dbclk;
@@ -768,7 +779,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host
*host, struct mmc_command *cmd,
 	OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
 	OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);

-	if (host->use_dma)
+	if (host->dma_in_use)
 		OMAP_HSMMC_WRITE(host->base, IE,
 				 INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE));
 	else
@@ -803,7 +814,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host
*host, struct mmc_command *cmd,
 			cmdreg &= ~(DDIR);
 	}

-	if (host->use_dma)
+	if (host->dma_in_use)
 		cmdreg |= DMA_EN;

 	/*
@@ -850,7 +861,7 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host,
struct mmc_data *data)

 	host->data = NULL;

-	if (host->use_dma && host->dma_ch != -1)
+	if (host->dma_in_use && host->dma_ch != -1)
 		dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
 			omap_hsmmc_get_dma_dir(host, data));

@@ -900,7 +911,7 @@ static void omap_hsmmc_dma_cleanup(struct
omap_hsmmc_host *host, int errno)
 {
 	host->data->error = errno;

-	if (host->use_dma && host->dma_ch != -1) {
+	if (host->dma_in_use && host->dma_ch != -1) {
 		dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len,
 			omap_hsmmc_get_dma_dir(host, host->data));
 		omap_free_dma(host->dma_ch);
@@ -1253,7 +1264,6 @@ static void omap_hsmmc_config_dma_params(struct
omap_hsmmc_host *host,
 			omap_hsmmc_get_dma_sync_dev(host, data),
 			!(data->flags & MMC_DATA_WRITE));

-	omap_start_dma(dma_ch);
 }

 /*
@@ -1268,21 +1278,32 @@ static void omap_hsmmc_dma_cb(int lch, u16
ch_status, void *data)

 	if (host->dma_ch < 0)
 		return;
-
-	host->dma_sg_idx++;
-	if (host->dma_sg_idx < host->dma_len) {
-		/* Fire up the next transfer. */
-		omap_hsmmc_config_dma_params(host, host->data,
+	if (host->dma_in_use == DMA_TYPE_SDMA) {
+		host->dma_sg_idx++;
+		if (host->dma_sg_idx < host->dma_len) {
+			/* Fire up the next transfer. */
+			omap_hsmmc_config_dma_params(host, host->data,
 					   host->data->sg + host->dma_sg_idx);
-		return;
+			omap_start_dma(host->dma_ch);
+			return;
+		}
 	}

 }

+static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host)
+{
+	if (host->dma_in_use == DMA_TYPE_SDMA)
+		omap_start_dma(host->dma_ch);
+	else if (host->dma_in_use == DMA_TYPE_SDMA_DLOAD)
+		return omap_start_dma_sglist_transfers(host->dma_ch, -1);
+
+	return 0;
+}
 /*
- * Routine to configure and start DMA for the MMC card
+ * Routine to configure DMA for the MMC card
  */
-static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
+static int omap_hsmmc_configure_sdma(struct omap_hsmmc_host *host,
 					struct mmc_request *req)
 {
 	int dma_ch = 0, ret = 0, err = 1, i;
@@ -1339,6 +1360,56 @@ static int omap_hsmmc_start_dma_transfer(struct
omap_hsmmc_host *host,
 	return 0;
 }

+static int omap_hsmmc_configure_sdma_sglist(struct omap_hsmmc_host *host,
+		struct mmc_request *req)
+{
+	int i;
+	struct omap_dma_sglist_node *sglist, *snode;
+	struct mmc_data *data = req->data;
+	int blksz;
+	int dmadir = omap_hsmmc_get_dma_dir(host, data);
+	struct omap_dma_sglist_type2a_params *t2p;
+
+	sglist = (struct omap_dma_sglist_node *) host->dma_ctrl_buf;
+	snode = sglist;
+	blksz = host->data->blksz;
+
+	if ((host->dma_len * sizeof(*snode)) > DMA_CTRL_BUF_SIZE) {
+		dev_err(mmc_dev(host->mmc), "not enough sglist memory %d\n",
+			host->dma_len);
+		return -ENOMEM;
+	}
+	for (i = 0; i < host->dma_len; snode++, i++) {
+		snode->desc_type = OMAP_DMA_SGLIST_DESCRIPTOR_TYPE2a;
+		snode->num_of_elem = blksz / 4;
+		t2p = &snode->sg_node.t2a;
+
+		if (dmadir == DMA_FROM_DEVICE) {
+			t2p->src_addr = host->mapbase + OMAP_HSMMC_DATA;
+			t2p->dst_addr = sg_dma_address(data->sg + i);
+		} else {
+			t2p->dst_addr = host->mapbase + OMAP_HSMMC_DATA;
+			t2p->src_addr = sg_dma_address(data->sg + i);
+		}
+		snode->flags =
+			OMAP_DMA_LIST_DST_VALID | OMAP_DMA_LIST_SRC_VALID;
+
+		t2p->cfn_fn = sg_dma_len(data->sg + i) / host->data->blksz;
+		t2p->cicr = DMA_ICR_QUIET;
+
+		t2p->dst_frame_idx_or_pkt_size = 0;
+		t2p->src_frame_idx_or_pkt_size = 0;
+		t2p->dst_elem_idx = 0;
+		t2p->src_elem_idx = 0;
+	}
+	dev_dbg(mmc_dev(host->mmc), "new sglist %x len =%d\n",
+			host->dma_ctrl_buf_phy, i);
+	omap_set_dma_sglist_mode(host->dma_ch, sglist,
+			host->dma_ctrl_buf_phy, i, NULL);
+	omap_dma_set_sglist_fastmode(host->dma_ch, 1);
+	return 0;
+}
+
 static void set_data_timeout(struct omap_hsmmc_host *host,
 			     unsigned int timeout_ns,
 			     unsigned int timeout_clks)
@@ -1400,14 +1471,23 @@ omap_hsmmc_prepare_data(struct omap_hsmmc_host
*host, struct mmc_request *req)
 					| (req->data->blocks << 16));
 	set_data_timeout(host, req->data->timeout_ns, req->data->timeout_clks);

-	if (host->use_dma) {
-		ret = omap_hsmmc_start_dma_transfer(host, req);
-		if (ret != 0) {
-			dev_dbg(mmc_dev(host->mmc), "MMC start dma failure\n");
+	if (host->dma_caps & DMA_TYPE_SDMA) {
+		ret = omap_hsmmc_configure_sdma(host, req);
+		if (ret)
 			return ret;
-		}
+		host->dma_in_use = DMA_TYPE_SDMA;
 	}
-	return 0;
+	if ((host->dma_caps & DMA_TYPE_SDMA_DLOAD) &&
+		host->data->sg_len > 4) {
+		ret = omap_hsmmc_configure_sdma_sglist(host, req);
+		if (ret)
+			return ret;
+		host->dma_in_use = DMA_TYPE_SDMA_DLOAD;
+
+	}
+	ret = omap_hsmmc_start_dma_transfer(host);
+	return ret;
+
 }

 /*
@@ -1999,7 +2079,9 @@ static int __init omap_hsmmc_probe(struct
platform_device *pdev)
 	host->mmc	= mmc;
 	host->pdata	= pdata;
 	host->dev	= &pdev->dev;
-	host->use_dma	= 1;
+	host->dma_caps	= DMA_TYPE_SDMA;
+	host->dma_in_use	= DMA_TYPE_NODMA;
+	host->dma_ctrl_buf = NULL;
 	host->dev->dma_mask = &pdata->dma_mask;
 	host->dma_ch	= -1;
 	host->irq	= irq;
@@ -2081,6 +2163,15 @@ static int __init omap_hsmmc_probe(struct
platform_device *pdev)
 							" clk failed\n");
 	}

+	if (cpu_is_omap44xx() || cpu_is_omap3630()) {
+		host->dma_ctrl_buf = dma_alloc_coherent(NULL,
+					DMA_CTRL_BUF_SIZE,
+					&host->dma_ctrl_buf_phy,
+					0);
+		if (host->dma_ctrl_buf != NULL)
+			host->dma_caps |= DMA_TYPE_SDMA_DLOAD;
+	}
+
 	/* Since we do only SG emulation, we can have as many segs
 	 * as we want. */
 	mmc->max_phys_segs = 1024;
@@ -2207,6 +2298,10 @@ err_reg:
 err_irq_cd_init:
 	free_irq(host->irq, host);
 err_irq:
+	if (host->dma_ctrl_buf)
+		dma_free_coherent(NULL, DMA_CTRL_BUF_SIZE,
+				host->dma_ctrl_buf,
+				host->dma_ctrl_buf_phy);
 	mmc_host_disable(host->mmc);
 	clk_disable(host->iclk);
 	clk_put(host->fclk);
@@ -2234,6 +2329,12 @@ static int omap_hsmmc_remove(struct
platform_device *pdev)
 	if (host) {
 		mmc_host_enable(host->mmc);
 		mmc_remove_host(host->mmc);
+
+		if (host->dma_ctrl_buf != NULL) {
+			dma_free_coherent(NULL, DMA_CTRL_BUF_SIZE,
+				host->dma_ctrl_buf,
+				host->dma_ctrl_buf_phy);
+		}
 		if (host->use_reg)
 			omap_hsmmc_reg_put(host);
 		if (host->pdata->cleanup)
-- 
1.6.3.3

^ permalink raw reply related	[flat|nested] 24+ messages in thread

end of thread, other threads:[~2010-03-12  8:18 UTC | newest]

Thread overview: 24+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-03-10 14:12 [PATCH 03/03] omap hsmmc: adaptation of sdma descriptor autoloading feature Venkatraman S
2010-03-10 14:12 ` Venkatraman S
2010-03-10 22:41 ` Tony Lindgren
2010-03-10 22:41   ` Tony Lindgren
2010-03-11 15:08   ` Venkatraman S
2010-03-11 15:08     ` Venkatraman S
2010-03-11 18:39     ` Tony Lindgren
2010-03-11 18:39       ` Tony Lindgren
2010-03-12  8:18       ` Venkatraman S
2010-03-12  8:18         ` Venkatraman S
  -- strict thread matches above, loose matches on Subject: below --
2010-03-01 11:27 Venkatraman S
2010-03-01 11:27 ` Venkatraman S
2010-03-11  1:01 ` Madhusudhan
2010-03-11  1:01   ` Madhusudhan
2010-03-11 10:52   ` Venkatraman S
2010-03-11 10:52     ` Venkatraman S
2010-03-11 16:27     ` Madhusudhan
2010-03-11 16:27       ` Madhusudhan
2010-03-11 17:42       ` Venkatraman S
2010-03-11 17:42         ` Venkatraman S
2010-03-12  2:29         ` Madhusudhan
2010-03-12  2:29           ` Madhusudhan
2010-03-12  6:03           ` Venkatraman S
2010-03-12  6:03             ` Venkatraman S

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.