All of lore.kernel.org
 help / color / mirror / Atom feed
From: Sricharan R <sricharan-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
To: devicetree-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	linux-arm-msm-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	linux-i2c-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	iivanov-NEYub+7Iv8PQT0dZR+AlfA@public.gmane.org,
	agross-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org,
	galak-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org,
	dmaengine-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	linux-arm-kernel-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r@public.gmane.org
Cc: sricharan-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org
Subject: [PATCH V3 4/6] i2c: qup: Transfer every i2c_msg in i2c_msgs without stop
Date: Sat, 11 Apr 2015 12:39:03 +0530	[thread overview]
Message-ID: <1428736145-18361-5-git-send-email-sricharan@codeaurora.org> (raw)
In-Reply-To: <1428736145-18361-1-git-send-email-sricharan-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>

The definition of i2c_msg says that

"If this is the last message in a group, it is followed by a STOP.
  Otherwise it is followed by the next @i2c_msg transaction segment,
  beginning with a (repeated) START"

So the expectation is that there is no 'STOP' bit inbetween individual
i2c_msg segments with repeated 'START'. The QUP i2c hardware has no way
to inform that there should not be a 'STOP' at the end of transaction.
The only way to implement this is to coalesce all the i2c_msg in i2c_msgs
in to one transaction and transfer them. Adding the support for the same.

This is required for some clients like touchscreen which keeps
incrementing counts across individual transfers and 'STOP' bit inbetween
resets the counter, which is not required.

Signed-off-by: Sricharan R <sricharan-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
---
 drivers/i2c/busses/i2c-qup.c | 200 ++++++++++++++++++++++++++-----------------
 1 file changed, 122 insertions(+), 78 deletions(-)

diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 4463ff8..1b71043 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -890,76 +890,94 @@ static int qup_i2c_req_dma(struct qup_i2c_dev *qup)
 	return 0;
 }
 
-static int bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+static int bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg, int num)
 {
 	struct dma_async_tx_descriptor *txd, *rxd = NULL;
 	int ret = 0;
 	dma_cookie_t cookie_rx, cookie_tx;
-	u32 rx_nents = 0, tx_nents = 0, len = 0;
-	/* QUP I2C read/write limit for single command is 256bytes max*/
-	int blocks = (msg->len + QUP_READ_LIMIT) / QUP_READ_LIMIT;
-	int rem = msg->len % QUP_READ_LIMIT;
-	int tlen, i = 0, tx_len = 0;
-
-	if (msg->flags & I2C_M_RD) {
-		tx_nents = 1;
-		rx_nents = (blocks << 1) + 1;
-		sg_init_table(qup->brx.sg_rx, rx_nents);
-
-		while (i < blocks) {
-			/* transfer length set to '0' implies 256 bytes */
-			tlen = (i == (blocks - 1)) ? rem : 0;
-			len += get_start_tag(&qup->start_tag.start[len],
-					      msg, !i, (i == (blocks - 1)),
-					      tlen);
-
-			qup_sg_set_buf(&qup->brx.sg_rx[i << 1],
-				       &qup->brx.scratch_tag.start[0],
-				       &qup->brx.scratch_tag,
-				       2, qup, 0, 0);
-
-			qup_sg_set_buf(&qup->brx.sg_rx[(i << 1) + 1],
-				       &msg->buf[QUP_READ_LIMIT * i],
-				       NULL, tlen, qup, 1,
-				       DMA_FROM_DEVICE);
-
-			i++;
-		}
-
-		sg_init_one(qup->btx.sg_tx, &qup->start_tag.start[0], len);
-		qup_sg_set_buf(qup->btx.sg_tx, &qup->start_tag.start[0],
-			       &qup->start_tag, len, qup, 0, 0);
-		qup_sg_set_buf(&qup->brx.sg_rx[i << 1],
-			       &qup->brx.scratch_tag.start[1],
-			       &qup->brx.scratch_tag, 2,
-			       qup, 0, 0);
-	} else {
-		qup->btx.footer_tag.start[0] = QUP_BAM_FLUSH_STOP;
-		qup->btx.footer_tag.start[1] = QUP_BAM_FLUSH_STOP;
-
-		tx_nents = (blocks << 1) + 1;
-		sg_init_table(qup->btx.sg_tx, tx_nents);
-
-		while (i < blocks) {
-			tlen = (i == (blocks - 1)) ? rem : 0;
-			len = get_start_tag(&qup->start_tag.start[tx_len],
-					    msg, !i, (i == (blocks - 1)), tlen);
-
-			qup_sg_set_buf(&qup->btx.sg_tx[i << 1],
-				       &qup->start_tag.start[tx_len],
-				       &qup->start_tag,
-				       len, qup, 0, 0);
-
-			tx_len += len;
-			qup_sg_set_buf(&qup->btx.sg_tx[(i << 1) + 1],
-				       &msg->buf[QUP_READ_LIMIT * i], NULL,
-				       tlen, qup, 1, DMA_TO_DEVICE);
-			i++;
+	u32 rx_nents = 0, tx_nents = 0, len, blocks, rem, last;
+	u32 cur_rx_nents, cur_tx_nents;
+	u32 tlen, i, tx_len, tx_buf = 0, rx_buf = 0, off = 0;
+
+	while (num) {
+		blocks = (msg->len + QUP_READ_LIMIT) / QUP_READ_LIMIT;
+		rem = msg->len % QUP_READ_LIMIT;
+		i = 0, tx_len = 0, len = 0;
+
+		if (msg->flags & I2C_M_RD) {
+			cur_tx_nents = 1;
+			cur_rx_nents = (blocks * 2) + 1;
+			rx_nents += cur_rx_nents;
+			tx_nents += cur_tx_nents;
+
+			while (i < blocks) {
+				/* transfer length set to '0' implies 256
+				   bytes */
+				tlen = (i == (blocks - 1)) ? rem : 0;
+				last = (i == (blocks - 1)) && !(num - 1);
+				len += get_start_tag(&qup->start_tag.start[off +
+									   len],
+						     msg, !i, last, tlen);
+
+				qup_sg_set_buf(&qup->brx.sg_rx[rx_buf++],
+					       &qup->brx.scratch_tag.start[0],
+					       &qup->brx.scratch_tag,
+					       2, qup, 0, 0);
+
+				qup_sg_set_buf(&qup->brx.sg_rx[rx_buf++],
+					       &msg->buf[QUP_READ_LIMIT * i],
+					       NULL, tlen, qup,
+					       1, DMA_FROM_DEVICE);
+				i++;
+			}
+			qup_sg_set_buf(&qup->btx.sg_tx[tx_buf++],
+				       &qup->start_tag.start[off],
+				       &qup->start_tag, len, qup, 0, 0);
+			off += len;
+			qup_sg_set_buf(&qup->brx.sg_rx[rx_buf++],
+				       &qup->brx.scratch_tag.start[1],
+				       &qup->brx.scratch_tag, 2,
+				       qup, 0, 0);
+		} else {
+			cur_tx_nents = (blocks * 2);
+			tx_nents += cur_tx_nents;
+
+			while (i < blocks) {
+				tlen = (i == (blocks - 1)) ? rem : 0;
+				last = (i == (blocks - 1)) && !(num - 1);
+				len = get_start_tag(&qup->start_tag.start[off +
+									tx_len],
+						    msg, !i, last, tlen);
+
+				qup_sg_set_buf(&qup->btx.sg_tx[tx_buf++],
+					       &qup->start_tag.start[off +
+								     tx_len],
+					       &qup->start_tag, len,
+					       qup, 0, 0);
+
+				tx_len += len;
+				qup_sg_set_buf(&qup->btx.sg_tx[tx_buf++],
+					       &msg->buf[QUP_READ_LIMIT * i],
+					       NULL, tlen, qup, 1,
+					       DMA_TO_DEVICE);
+				i++;
+			}
+			off += tx_len;
+
+			if (!(num - 1)) {
+				qup->btx.footer_tag.start[0] =
+							  QUP_BAM_FLUSH_STOP;
+				qup->btx.footer_tag.start[1] =
+							  QUP_BAM_FLUSH_STOP;
+				qup_sg_set_buf(&qup->btx.sg_tx[tx_buf++],
+					       &qup->btx.footer_tag.start[0],
+					       &qup->btx.footer_tag, 2,
+					       qup, 0, 0);
+				tx_nents += 1;
+			}
 		}
-		qup_sg_set_buf(&qup->btx.sg_tx[i << 1],
-			       &qup->btx.footer_tag.start[0],
-			       &qup->btx.footer_tag, 2,
-			       qup, 0, 0);
+		msg++;
+		num--;
 	}
 
 	txd = dmaengine_prep_slave_sg(qup->btx.dma_tx, qup->btx.sg_tx, tx_nents,
@@ -1006,10 +1024,20 @@ static int bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg)
 
 	if (ret || qup->bus_err || qup->qup_err) {
 		if (qup->bus_err & QUP_I2C_NACK_FLAG)
+			msg--;
 			dev_err(qup->dev, "NACK from %x\n", msg->addr);
 		ret = -EIO;
+
+		if (qup_i2c_change_state(qup, QUP_RUN_STATE)) {
+			dev_err(qup->dev, "change to run state timed out");
+			return ret;
+		}
+
+		writel(QUP_BAM_INPUT_EOT, qup->base + QUP_OUT_FIFO_BASE);
+		writel(QUP_BAM_FLUSH_STOP, qup->base + QUP_OUT_FIFO_BASE);
+		writel(QUP_BAM_FLUSH_STOP, qup->base + QUP_OUT_FIFO_BASE);
+		writel(QUP_BAM_FLUSH_STOP, qup->base + QUP_OUT_FIFO_BASE);
 		qup_i2c_flush(qup);
-		qup_i2c_change_state(qup, QUP_RUN_STATE);
 
 		/* wait for remaining interrupts to occur */
 		if (!wait_for_completion_timeout(&qup->xfer, HZ))
@@ -1022,7 +1050,7 @@ desc_err:
 	return ret;
 }
 
-static int qup_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg)
+static int qup_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
 {
 	struct qup_i2c_dev *qup = i2c_get_adapdata(adap);
 	int ret = 0;
@@ -1051,7 +1079,7 @@ static int qup_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg)
 	writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL);
 
 	qup->msg = msg;
-	ret = bam_do_xfer(qup, qup->msg);
+	ret = bam_do_xfer(qup, qup->msg, num);
 out:
 	disable_irq(qup->irq);
 
@@ -1064,7 +1092,7 @@ static int qup_i2c_xfer(struct i2c_adapter *adap,
 			int num)
 {
 	struct qup_i2c_dev *qup = i2c_get_adapdata(adap);
-	int ret, idx, last, len;
+	int ret, idx, last, use_dma = 0, len = 0;
 
 	ret = pm_runtime_get_sync(qup->dev);
 	if (ret < 0)
@@ -1083,12 +1111,27 @@ static int qup_i2c_xfer(struct i2c_adapter *adap,
 		writel(I2C_MINI_CORE | I2C_N_VAL, qup->base + QUP_CONFIG);
 	}
 
-	for (idx = 0; idx < num; idx++) {
-		if (msgs[idx].len == 0) {
-			ret = -EINVAL;
-			goto out;
+	if ((qup->is_dma)) {
+		/* All i2c_msgs should be transferred using either dma or cpu */
+		for (idx = 0; idx < num; idx++) {
+			if (msgs[idx].len == 0) {
+				ret = -EINVAL;
+				goto out;
+			}
+
+			if (!len)
+				len = ((&msgs[idx])->len) > qup->out_fifo_sz;
+
+			if ((!is_vmalloc_addr((&msgs[idx])->buf)) && len) {
+				use_dma = 1;
+			 } else {
+				use_dma = 0;
+				break;
+			}
 		}
+	}
 
+	for (idx = 0; idx < num; idx++) {
 		if (qup_i2c_poll_state_i2c_master(qup)) {
 			ret = -EIO;
 			goto out;
@@ -1096,11 +1139,9 @@ static int qup_i2c_xfer(struct i2c_adapter *adap,
 
 		reinit_completion(&qup->xfer);
 
-		len = (&msgs[idx])->len;
-
-		if ((qup->is_dma) && (!is_vmalloc_addr((&msgs[idx])->buf)) &&
-		    (len > qup->out_fifo_sz)) {
-			ret = qup_bam_xfer(adap, &msgs[idx]);
+		if (use_dma) {
+			ret = qup_bam_xfer(adap, &msgs[idx], num);
+			idx = num;
 		} else {
 			last = (idx == (num - 1));
 			if (msgs[idx].flags & I2C_M_RD)
@@ -1215,6 +1256,8 @@ static int qup_i2c_probe(struct platform_device *pdev)
 			ret = -ENOMEM;
 			goto fail;
 		}
+		sg_init_table(qup->btx.sg_tx, blocks);
+
 		qup->brx.sg_rx = devm_kzalloc(&pdev->dev,
 					  sizeof(*qup->btx.sg_tx) * blocks,
 					  GFP_KERNEL);
@@ -1222,6 +1265,7 @@ static int qup_i2c_probe(struct platform_device *pdev)
 			ret = -ENOMEM;
 			goto fail;
 		}
+		sg_init_table(qup->brx.sg_rx, blocks);
 
 		size = sizeof(struct qup_i2c_tag) * (blocks + 3);
 		qup->dpool = dma_pool_create("qup_i2c-dma-pool", &pdev->dev,
-- 
QUALCOMM INDIA, on behalf of Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum, hosted by The Linux Foundation

WARNING: multiple messages have this Message-ID (diff)
From: Sricharan R <sricharan@codeaurora.org>
To: devicetree@vger.kernel.org, linux-arm-msm@vger.kernel.org,
	linux-kernel@vger.kernel.org, linux-i2c@vger.kernel.org,
	iivanov@mm-sol.com, agross@codeaurora.org, galak@codeaurora.org,
	dmaengine@vger.kernel.org, linux-arm-kernel@lists.infradead.org
Cc: sricharan@codeaurora.org
Subject: [PATCH V3 4/6] i2c: qup: Transfer every i2c_msg in i2c_msgs without stop
Date: Sat, 11 Apr 2015 12:39:03 +0530	[thread overview]
Message-ID: <1428736145-18361-5-git-send-email-sricharan@codeaurora.org> (raw)
In-Reply-To: <1428736145-18361-1-git-send-email-sricharan@codeaurora.org>

The definition of i2c_msg says that

"If this is the last message in a group, it is followed by a STOP.
  Otherwise it is followed by the next @i2c_msg transaction segment,
  beginning with a (repeated) START"

So the expectation is that there is no 'STOP' bit inbetween individual
i2c_msg segments with repeated 'START'. The QUP i2c hardware has no way
to inform that there should not be a 'STOP' at the end of transaction.
The only way to implement this is to coalesce all the i2c_msg in i2c_msgs
in to one transaction and transfer them. Adding the support for the same.

This is required for some clients like touchscreen which keeps
incrementing counts across individual transfers and 'STOP' bit inbetween
resets the counter, which is not required.

Signed-off-by: Sricharan R <sricharan@codeaurora.org>
---
 drivers/i2c/busses/i2c-qup.c | 200 ++++++++++++++++++++++++++-----------------
 1 file changed, 122 insertions(+), 78 deletions(-)

diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 4463ff8..1b71043 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -890,76 +890,94 @@ static int qup_i2c_req_dma(struct qup_i2c_dev *qup)
 	return 0;
 }
 
-static int bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+static int bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg, int num)
 {
 	struct dma_async_tx_descriptor *txd, *rxd = NULL;
 	int ret = 0;
 	dma_cookie_t cookie_rx, cookie_tx;
-	u32 rx_nents = 0, tx_nents = 0, len = 0;
-	/* QUP I2C read/write limit for single command is 256bytes max*/
-	int blocks = (msg->len + QUP_READ_LIMIT) / QUP_READ_LIMIT;
-	int rem = msg->len % QUP_READ_LIMIT;
-	int tlen, i = 0, tx_len = 0;
-
-	if (msg->flags & I2C_M_RD) {
-		tx_nents = 1;
-		rx_nents = (blocks << 1) + 1;
-		sg_init_table(qup->brx.sg_rx, rx_nents);
-
-		while (i < blocks) {
-			/* transfer length set to '0' implies 256 bytes */
-			tlen = (i == (blocks - 1)) ? rem : 0;
-			len += get_start_tag(&qup->start_tag.start[len],
-					      msg, !i, (i == (blocks - 1)),
-					      tlen);
-
-			qup_sg_set_buf(&qup->brx.sg_rx[i << 1],
-				       &qup->brx.scratch_tag.start[0],
-				       &qup->brx.scratch_tag,
-				       2, qup, 0, 0);
-
-			qup_sg_set_buf(&qup->brx.sg_rx[(i << 1) + 1],
-				       &msg->buf[QUP_READ_LIMIT * i],
-				       NULL, tlen, qup, 1,
-				       DMA_FROM_DEVICE);
-
-			i++;
-		}
-
-		sg_init_one(qup->btx.sg_tx, &qup->start_tag.start[0], len);
-		qup_sg_set_buf(qup->btx.sg_tx, &qup->start_tag.start[0],
-			       &qup->start_tag, len, qup, 0, 0);
-		qup_sg_set_buf(&qup->brx.sg_rx[i << 1],
-			       &qup->brx.scratch_tag.start[1],
-			       &qup->brx.scratch_tag, 2,
-			       qup, 0, 0);
-	} else {
-		qup->btx.footer_tag.start[0] = QUP_BAM_FLUSH_STOP;
-		qup->btx.footer_tag.start[1] = QUP_BAM_FLUSH_STOP;
-
-		tx_nents = (blocks << 1) + 1;
-		sg_init_table(qup->btx.sg_tx, tx_nents);
-
-		while (i < blocks) {
-			tlen = (i == (blocks - 1)) ? rem : 0;
-			len = get_start_tag(&qup->start_tag.start[tx_len],
-					    msg, !i, (i == (blocks - 1)), tlen);
-
-			qup_sg_set_buf(&qup->btx.sg_tx[i << 1],
-				       &qup->start_tag.start[tx_len],
-				       &qup->start_tag,
-				       len, qup, 0, 0);
-
-			tx_len += len;
-			qup_sg_set_buf(&qup->btx.sg_tx[(i << 1) + 1],
-				       &msg->buf[QUP_READ_LIMIT * i], NULL,
-				       tlen, qup, 1, DMA_TO_DEVICE);
-			i++;
+	u32 rx_nents = 0, tx_nents = 0, len, blocks, rem, last;
+	u32 cur_rx_nents, cur_tx_nents;
+	u32 tlen, i, tx_len, tx_buf = 0, rx_buf = 0, off = 0;
+
+	while (num) {
+		blocks = (msg->len + QUP_READ_LIMIT) / QUP_READ_LIMIT;
+		rem = msg->len % QUP_READ_LIMIT;
+		i = 0, tx_len = 0, len = 0;
+
+		if (msg->flags & I2C_M_RD) {
+			cur_tx_nents = 1;
+			cur_rx_nents = (blocks * 2) + 1;
+			rx_nents += cur_rx_nents;
+			tx_nents += cur_tx_nents;
+
+			while (i < blocks) {
+				/* transfer length set to '0' implies 256
+				   bytes */
+				tlen = (i == (blocks - 1)) ? rem : 0;
+				last = (i == (blocks - 1)) && !(num - 1);
+				len += get_start_tag(&qup->start_tag.start[off +
+									   len],
+						     msg, !i, last, tlen);
+
+				qup_sg_set_buf(&qup->brx.sg_rx[rx_buf++],
+					       &qup->brx.scratch_tag.start[0],
+					       &qup->brx.scratch_tag,
+					       2, qup, 0, 0);
+
+				qup_sg_set_buf(&qup->brx.sg_rx[rx_buf++],
+					       &msg->buf[QUP_READ_LIMIT * i],
+					       NULL, tlen, qup,
+					       1, DMA_FROM_DEVICE);
+				i++;
+			}
+			qup_sg_set_buf(&qup->btx.sg_tx[tx_buf++],
+				       &qup->start_tag.start[off],
+				       &qup->start_tag, len, qup, 0, 0);
+			off += len;
+			qup_sg_set_buf(&qup->brx.sg_rx[rx_buf++],
+				       &qup->brx.scratch_tag.start[1],
+				       &qup->brx.scratch_tag, 2,
+				       qup, 0, 0);
+		} else {
+			cur_tx_nents = (blocks * 2);
+			tx_nents += cur_tx_nents;
+
+			while (i < blocks) {
+				tlen = (i == (blocks - 1)) ? rem : 0;
+				last = (i == (blocks - 1)) && !(num - 1);
+				len = get_start_tag(&qup->start_tag.start[off +
+									tx_len],
+						    msg, !i, last, tlen);
+
+				qup_sg_set_buf(&qup->btx.sg_tx[tx_buf++],
+					       &qup->start_tag.start[off +
+								     tx_len],
+					       &qup->start_tag, len,
+					       qup, 0, 0);
+
+				tx_len += len;
+				qup_sg_set_buf(&qup->btx.sg_tx[tx_buf++],
+					       &msg->buf[QUP_READ_LIMIT * i],
+					       NULL, tlen, qup, 1,
+					       DMA_TO_DEVICE);
+				i++;
+			}
+			off += tx_len;
+
+			if (!(num - 1)) {
+				qup->btx.footer_tag.start[0] =
+							  QUP_BAM_FLUSH_STOP;
+				qup->btx.footer_tag.start[1] =
+							  QUP_BAM_FLUSH_STOP;
+				qup_sg_set_buf(&qup->btx.sg_tx[tx_buf++],
+					       &qup->btx.footer_tag.start[0],
+					       &qup->btx.footer_tag, 2,
+					       qup, 0, 0);
+				tx_nents += 1;
+			}
 		}
-		qup_sg_set_buf(&qup->btx.sg_tx[i << 1],
-			       &qup->btx.footer_tag.start[0],
-			       &qup->btx.footer_tag, 2,
-			       qup, 0, 0);
+		msg++;
+		num--;
 	}
 
 	txd = dmaengine_prep_slave_sg(qup->btx.dma_tx, qup->btx.sg_tx, tx_nents,
@@ -1006,10 +1024,20 @@ static int bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg)
 
 	if (ret || qup->bus_err || qup->qup_err) {
 		if (qup->bus_err & QUP_I2C_NACK_FLAG)
+			msg--;
 			dev_err(qup->dev, "NACK from %x\n", msg->addr);
 		ret = -EIO;
+
+		if (qup_i2c_change_state(qup, QUP_RUN_STATE)) {
+			dev_err(qup->dev, "change to run state timed out");
+			return ret;
+		}
+
+		writel(QUP_BAM_INPUT_EOT, qup->base + QUP_OUT_FIFO_BASE);
+		writel(QUP_BAM_FLUSH_STOP, qup->base + QUP_OUT_FIFO_BASE);
+		writel(QUP_BAM_FLUSH_STOP, qup->base + QUP_OUT_FIFO_BASE);
+		writel(QUP_BAM_FLUSH_STOP, qup->base + QUP_OUT_FIFO_BASE);
 		qup_i2c_flush(qup);
-		qup_i2c_change_state(qup, QUP_RUN_STATE);
 
 		/* wait for remaining interrupts to occur */
 		if (!wait_for_completion_timeout(&qup->xfer, HZ))
@@ -1022,7 +1050,7 @@ desc_err:
 	return ret;
 }
 
-static int qup_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg)
+static int qup_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
 {
 	struct qup_i2c_dev *qup = i2c_get_adapdata(adap);
 	int ret = 0;
@@ -1051,7 +1079,7 @@ static int qup_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg)
 	writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL);
 
 	qup->msg = msg;
-	ret = bam_do_xfer(qup, qup->msg);
+	ret = bam_do_xfer(qup, qup->msg, num);
 out:
 	disable_irq(qup->irq);
 
@@ -1064,7 +1092,7 @@ static int qup_i2c_xfer(struct i2c_adapter *adap,
 			int num)
 {
 	struct qup_i2c_dev *qup = i2c_get_adapdata(adap);
-	int ret, idx, last, len;
+	int ret, idx, last, use_dma = 0, len = 0;
 
 	ret = pm_runtime_get_sync(qup->dev);
 	if (ret < 0)
@@ -1083,12 +1111,27 @@ static int qup_i2c_xfer(struct i2c_adapter *adap,
 		writel(I2C_MINI_CORE | I2C_N_VAL, qup->base + QUP_CONFIG);
 	}
 
-	for (idx = 0; idx < num; idx++) {
-		if (msgs[idx].len == 0) {
-			ret = -EINVAL;
-			goto out;
+	if ((qup->is_dma)) {
+		/* All i2c_msgs should be transferred using either dma or cpu */
+		for (idx = 0; idx < num; idx++) {
+			if (msgs[idx].len == 0) {
+				ret = -EINVAL;
+				goto out;
+			}
+
+			if (!len)
+				len = ((&msgs[idx])->len) > qup->out_fifo_sz;
+
+			if ((!is_vmalloc_addr((&msgs[idx])->buf)) && len) {
+				use_dma = 1;
+			 } else {
+				use_dma = 0;
+				break;
+			}
 		}
+	}
 
+	for (idx = 0; idx < num; idx++) {
 		if (qup_i2c_poll_state_i2c_master(qup)) {
 			ret = -EIO;
 			goto out;
@@ -1096,11 +1139,9 @@ static int qup_i2c_xfer(struct i2c_adapter *adap,
 
 		reinit_completion(&qup->xfer);
 
-		len = (&msgs[idx])->len;
-
-		if ((qup->is_dma) && (!is_vmalloc_addr((&msgs[idx])->buf)) &&
-		    (len > qup->out_fifo_sz)) {
-			ret = qup_bam_xfer(adap, &msgs[idx]);
+		if (use_dma) {
+			ret = qup_bam_xfer(adap, &msgs[idx], num);
+			idx = num;
 		} else {
 			last = (idx == (num - 1));
 			if (msgs[idx].flags & I2C_M_RD)
@@ -1215,6 +1256,8 @@ static int qup_i2c_probe(struct platform_device *pdev)
 			ret = -ENOMEM;
 			goto fail;
 		}
+		sg_init_table(qup->btx.sg_tx, blocks);
+
 		qup->brx.sg_rx = devm_kzalloc(&pdev->dev,
 					  sizeof(*qup->btx.sg_tx) * blocks,
 					  GFP_KERNEL);
@@ -1222,6 +1265,7 @@ static int qup_i2c_probe(struct platform_device *pdev)
 			ret = -ENOMEM;
 			goto fail;
 		}
+		sg_init_table(qup->brx.sg_rx, blocks);
 
 		size = sizeof(struct qup_i2c_tag) * (blocks + 3);
 		qup->dpool = dma_pool_create("qup_i2c-dma-pool", &pdev->dev,
-- 
QUALCOMM INDIA, on behalf of Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum, hosted by The Linux Foundation


WARNING: multiple messages have this Message-ID (diff)
From: sricharan@codeaurora.org (Sricharan R)
To: linux-arm-kernel@lists.infradead.org
Subject: [PATCH V3 4/6] i2c: qup: Transfer every i2c_msg in i2c_msgs without stop
Date: Sat, 11 Apr 2015 12:39:03 +0530	[thread overview]
Message-ID: <1428736145-18361-5-git-send-email-sricharan@codeaurora.org> (raw)
In-Reply-To: <1428736145-18361-1-git-send-email-sricharan@codeaurora.org>

The definition of i2c_msg says that

"If this is the last message in a group, it is followed by a STOP.
  Otherwise it is followed by the next @i2c_msg transaction segment,
  beginning with a (repeated) START"

So the expectation is that there is no 'STOP' bit inbetween individual
i2c_msg segments with repeated 'START'. The QUP i2c hardware has no way
to inform that there should not be a 'STOP' at the end of transaction.
The only way to implement this is to coalesce all the i2c_msg in i2c_msgs
in to one transaction and transfer them. Adding the support for the same.

This is required for some clients like touchscreen which keeps
incrementing counts across individual transfers and 'STOP' bit inbetween
resets the counter, which is not required.

Signed-off-by: Sricharan R <sricharan@codeaurora.org>
---
 drivers/i2c/busses/i2c-qup.c | 200 ++++++++++++++++++++++++++-----------------
 1 file changed, 122 insertions(+), 78 deletions(-)

diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 4463ff8..1b71043 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -890,76 +890,94 @@ static int qup_i2c_req_dma(struct qup_i2c_dev *qup)
 	return 0;
 }
 
-static int bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+static int bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg, int num)
 {
 	struct dma_async_tx_descriptor *txd, *rxd = NULL;
 	int ret = 0;
 	dma_cookie_t cookie_rx, cookie_tx;
-	u32 rx_nents = 0, tx_nents = 0, len = 0;
-	/* QUP I2C read/write limit for single command is 256bytes max*/
-	int blocks = (msg->len + QUP_READ_LIMIT) / QUP_READ_LIMIT;
-	int rem = msg->len % QUP_READ_LIMIT;
-	int tlen, i = 0, tx_len = 0;
-
-	if (msg->flags & I2C_M_RD) {
-		tx_nents = 1;
-		rx_nents = (blocks << 1) + 1;
-		sg_init_table(qup->brx.sg_rx, rx_nents);
-
-		while (i < blocks) {
-			/* transfer length set to '0' implies 256 bytes */
-			tlen = (i == (blocks - 1)) ? rem : 0;
-			len += get_start_tag(&qup->start_tag.start[len],
-					      msg, !i, (i == (blocks - 1)),
-					      tlen);
-
-			qup_sg_set_buf(&qup->brx.sg_rx[i << 1],
-				       &qup->brx.scratch_tag.start[0],
-				       &qup->brx.scratch_tag,
-				       2, qup, 0, 0);
-
-			qup_sg_set_buf(&qup->brx.sg_rx[(i << 1) + 1],
-				       &msg->buf[QUP_READ_LIMIT * i],
-				       NULL, tlen, qup, 1,
-				       DMA_FROM_DEVICE);
-
-			i++;
-		}
-
-		sg_init_one(qup->btx.sg_tx, &qup->start_tag.start[0], len);
-		qup_sg_set_buf(qup->btx.sg_tx, &qup->start_tag.start[0],
-			       &qup->start_tag, len, qup, 0, 0);
-		qup_sg_set_buf(&qup->brx.sg_rx[i << 1],
-			       &qup->brx.scratch_tag.start[1],
-			       &qup->brx.scratch_tag, 2,
-			       qup, 0, 0);
-	} else {
-		qup->btx.footer_tag.start[0] = QUP_BAM_FLUSH_STOP;
-		qup->btx.footer_tag.start[1] = QUP_BAM_FLUSH_STOP;
-
-		tx_nents = (blocks << 1) + 1;
-		sg_init_table(qup->btx.sg_tx, tx_nents);
-
-		while (i < blocks) {
-			tlen = (i == (blocks - 1)) ? rem : 0;
-			len = get_start_tag(&qup->start_tag.start[tx_len],
-					    msg, !i, (i == (blocks - 1)), tlen);
-
-			qup_sg_set_buf(&qup->btx.sg_tx[i << 1],
-				       &qup->start_tag.start[tx_len],
-				       &qup->start_tag,
-				       len, qup, 0, 0);
-
-			tx_len += len;
-			qup_sg_set_buf(&qup->btx.sg_tx[(i << 1) + 1],
-				       &msg->buf[QUP_READ_LIMIT * i], NULL,
-				       tlen, qup, 1, DMA_TO_DEVICE);
-			i++;
+	u32 rx_nents = 0, tx_nents = 0, len, blocks, rem, last;
+	u32 cur_rx_nents, cur_tx_nents;
+	u32 tlen, i, tx_len, tx_buf = 0, rx_buf = 0, off = 0;
+
+	while (num) {
+		blocks = (msg->len + QUP_READ_LIMIT) / QUP_READ_LIMIT;
+		rem = msg->len % QUP_READ_LIMIT;
+		i = 0, tx_len = 0, len = 0;
+
+		if (msg->flags & I2C_M_RD) {
+			cur_tx_nents = 1;
+			cur_rx_nents = (blocks * 2) + 1;
+			rx_nents += cur_rx_nents;
+			tx_nents += cur_tx_nents;
+
+			while (i < blocks) {
+				/* transfer length set to '0' implies 256
+				   bytes */
+				tlen = (i == (blocks - 1)) ? rem : 0;
+				last = (i == (blocks - 1)) && !(num - 1);
+				len += get_start_tag(&qup->start_tag.start[off +
+									   len],
+						     msg, !i, last, tlen);
+
+				qup_sg_set_buf(&qup->brx.sg_rx[rx_buf++],
+					       &qup->brx.scratch_tag.start[0],
+					       &qup->brx.scratch_tag,
+					       2, qup, 0, 0);
+
+				qup_sg_set_buf(&qup->brx.sg_rx[rx_buf++],
+					       &msg->buf[QUP_READ_LIMIT * i],
+					       NULL, tlen, qup,
+					       1, DMA_FROM_DEVICE);
+				i++;
+			}
+			qup_sg_set_buf(&qup->btx.sg_tx[tx_buf++],
+				       &qup->start_tag.start[off],
+				       &qup->start_tag, len, qup, 0, 0);
+			off += len;
+			qup_sg_set_buf(&qup->brx.sg_rx[rx_buf++],
+				       &qup->brx.scratch_tag.start[1],
+				       &qup->brx.scratch_tag, 2,
+				       qup, 0, 0);
+		} else {
+			cur_tx_nents = (blocks * 2);
+			tx_nents += cur_tx_nents;
+
+			while (i < blocks) {
+				tlen = (i == (blocks - 1)) ? rem : 0;
+				last = (i == (blocks - 1)) && !(num - 1);
+				len = get_start_tag(&qup->start_tag.start[off +
+									tx_len],
+						    msg, !i, last, tlen);
+
+				qup_sg_set_buf(&qup->btx.sg_tx[tx_buf++],
+					       &qup->start_tag.start[off +
+								     tx_len],
+					       &qup->start_tag, len,
+					       qup, 0, 0);
+
+				tx_len += len;
+				qup_sg_set_buf(&qup->btx.sg_tx[tx_buf++],
+					       &msg->buf[QUP_READ_LIMIT * i],
+					       NULL, tlen, qup, 1,
+					       DMA_TO_DEVICE);
+				i++;
+			}
+			off += tx_len;
+
+			if (!(num - 1)) {
+				qup->btx.footer_tag.start[0] =
+							  QUP_BAM_FLUSH_STOP;
+				qup->btx.footer_tag.start[1] =
+							  QUP_BAM_FLUSH_STOP;
+				qup_sg_set_buf(&qup->btx.sg_tx[tx_buf++],
+					       &qup->btx.footer_tag.start[0],
+					       &qup->btx.footer_tag, 2,
+					       qup, 0, 0);
+				tx_nents += 1;
+			}
 		}
-		qup_sg_set_buf(&qup->btx.sg_tx[i << 1],
-			       &qup->btx.footer_tag.start[0],
-			       &qup->btx.footer_tag, 2,
-			       qup, 0, 0);
+		msg++;
+		num--;
 	}
 
 	txd = dmaengine_prep_slave_sg(qup->btx.dma_tx, qup->btx.sg_tx, tx_nents,
@@ -1006,10 +1024,20 @@ static int bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg)
 
 	if (ret || qup->bus_err || qup->qup_err) {
 		if (qup->bus_err & QUP_I2C_NACK_FLAG)
+			msg--;
 			dev_err(qup->dev, "NACK from %x\n", msg->addr);
 		ret = -EIO;
+
+		if (qup_i2c_change_state(qup, QUP_RUN_STATE)) {
+			dev_err(qup->dev, "change to run state timed out");
+			return ret;
+		}
+
+		writel(QUP_BAM_INPUT_EOT, qup->base + QUP_OUT_FIFO_BASE);
+		writel(QUP_BAM_FLUSH_STOP, qup->base + QUP_OUT_FIFO_BASE);
+		writel(QUP_BAM_FLUSH_STOP, qup->base + QUP_OUT_FIFO_BASE);
+		writel(QUP_BAM_FLUSH_STOP, qup->base + QUP_OUT_FIFO_BASE);
 		qup_i2c_flush(qup);
-		qup_i2c_change_state(qup, QUP_RUN_STATE);
 
 		/* wait for remaining interrupts to occur */
 		if (!wait_for_completion_timeout(&qup->xfer, HZ))
@@ -1022,7 +1050,7 @@ desc_err:
 	return ret;
 }
 
-static int qup_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg)
+static int qup_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
 {
 	struct qup_i2c_dev *qup = i2c_get_adapdata(adap);
 	int ret = 0;
@@ -1051,7 +1079,7 @@ static int qup_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg)
 	writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL);
 
 	qup->msg = msg;
-	ret = bam_do_xfer(qup, qup->msg);
+	ret = bam_do_xfer(qup, qup->msg, num);
 out:
 	disable_irq(qup->irq);
 
@@ -1064,7 +1092,7 @@ static int qup_i2c_xfer(struct i2c_adapter *adap,
 			int num)
 {
 	struct qup_i2c_dev *qup = i2c_get_adapdata(adap);
-	int ret, idx, last, len;
+	int ret, idx, last, use_dma = 0, len = 0;
 
 	ret = pm_runtime_get_sync(qup->dev);
 	if (ret < 0)
@@ -1083,12 +1111,27 @@ static int qup_i2c_xfer(struct i2c_adapter *adap,
 		writel(I2C_MINI_CORE | I2C_N_VAL, qup->base + QUP_CONFIG);
 	}
 
-	for (idx = 0; idx < num; idx++) {
-		if (msgs[idx].len == 0) {
-			ret = -EINVAL;
-			goto out;
+	if ((qup->is_dma)) {
+		/* All i2c_msgs should be transferred using either dma or cpu */
+		for (idx = 0; idx < num; idx++) {
+			if (msgs[idx].len == 0) {
+				ret = -EINVAL;
+				goto out;
+			}
+
+			if (!len)
+				len = ((&msgs[idx])->len) > qup->out_fifo_sz;
+
+			if ((!is_vmalloc_addr((&msgs[idx])->buf)) && len) {
+				use_dma = 1;
+			 } else {
+				use_dma = 0;
+				break;
+			}
 		}
+	}
 
+	for (idx = 0; idx < num; idx++) {
 		if (qup_i2c_poll_state_i2c_master(qup)) {
 			ret = -EIO;
 			goto out;
@@ -1096,11 +1139,9 @@ static int qup_i2c_xfer(struct i2c_adapter *adap,
 
 		reinit_completion(&qup->xfer);
 
-		len = (&msgs[idx])->len;
-
-		if ((qup->is_dma) && (!is_vmalloc_addr((&msgs[idx])->buf)) &&
-		    (len > qup->out_fifo_sz)) {
-			ret = qup_bam_xfer(adap, &msgs[idx]);
+		if (use_dma) {
+			ret = qup_bam_xfer(adap, &msgs[idx], num);
+			idx = num;
 		} else {
 			last = (idx == (num - 1));
 			if (msgs[idx].flags & I2C_M_RD)
@@ -1215,6 +1256,8 @@ static int qup_i2c_probe(struct platform_device *pdev)
 			ret = -ENOMEM;
 			goto fail;
 		}
+		sg_init_table(qup->btx.sg_tx, blocks);
+
 		qup->brx.sg_rx = devm_kzalloc(&pdev->dev,
 					  sizeof(*qup->btx.sg_tx) * blocks,
 					  GFP_KERNEL);
@@ -1222,6 +1265,7 @@ static int qup_i2c_probe(struct platform_device *pdev)
 			ret = -ENOMEM;
 			goto fail;
 		}
+		sg_init_table(qup->brx.sg_rx, blocks);
 
 		size = sizeof(struct qup_i2c_tag) * (blocks + 3);
 		qup->dpool = dma_pool_create("qup_i2c-dma-pool", &pdev->dev,
-- 
QUALCOMM INDIA, on behalf of Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum, hosted by The Linux Foundation

  parent reply	other threads:[~2015-04-11  7:09 UTC|newest]

Thread overview: 41+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-04-11  7:08 [PATCH V3 0/6] i2c: qup: Add support for v2 tags and bam dma Sricharan R
2015-04-11  7:08 ` Sricharan R
2015-04-11  7:08 ` Sricharan R
2015-04-11  7:09 ` [PATCH V3 2/6] i2c: qup: Add V2 tags support Sricharan R
2015-04-11  7:09   ` Sricharan R
2015-04-14 15:16   ` Ivan T. Ivanov
2015-04-14 15:16     ` Ivan T. Ivanov
     [not found]     ` <1429024595.16939.5.camel-NEYub+7Iv8PQT0dZR+AlfA@public.gmane.org>
2015-04-15  6:39       ` Sricharan R
2015-04-15  6:39         ` Sricharan R
2015-04-15  6:39         ` Sricharan R
2015-04-15  8:49         ` Ivan T. Ivanov
2015-04-15  8:49           ` Ivan T. Ivanov
2015-04-15 13:20           ` Sricharan R
2015-04-15 13:20             ` Sricharan R
2015-04-15 14:44           ` Sricharan R
2015-04-15 14:44             ` Sricharan R
     [not found]             ` <552E7962.40606-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
2015-04-16  8:36               ` Ivan T. Ivanov
2015-04-16  8:36                 ` Ivan T. Ivanov
2015-04-16  8:36                 ` Ivan T. Ivanov
2015-04-16  9:39                 ` Sricharan R
2015-04-16  9:39                   ` Sricharan R
     [not found] ` <1428736145-18361-1-git-send-email-sricharan-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
2015-04-11  7:09   ` [PATCH V3 1/6] i2c: qup: Change qup_wait_writeready function to use for all timeouts Sricharan R
2015-04-11  7:09     ` Sricharan R
2015-04-11  7:09     ` Sricharan R
2015-04-11  7:09   ` [PATCH V3 3/6] i2c: qup: Add bam dma capabilities Sricharan R
2015-04-11  7:09     ` Sricharan R
2015-04-11  7:09     ` Sricharan R
2015-04-11  7:09   ` Sricharan R [this message]
2015-04-11  7:09     ` [PATCH V3 4/6] i2c: qup: Transfer every i2c_msg in i2c_msgs without stop Sricharan R
2015-04-11  7:09     ` Sricharan R
2015-04-11  7:09   ` [PATCH V3 5/6] dts: msm8974: Add blsp2_bam dma node Sricharan R
2015-04-11  7:09     ` Sricharan R
2015-04-11  7:09     ` Sricharan R
     [not found]     ` <1428736145-18361-6-git-send-email-sricharan-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
2015-04-11 22:12       ` Sergei Shtylyov
2015-04-11 22:12         ` Sergei Shtylyov
2015-04-11 22:12         ` Sergei Shtylyov
     [not found]         ` <55299C4A.3020905-M4DtvfQ/ZS1MRgGoP+s0PdBPR1lH4CV8@public.gmane.org>
2015-04-13  4:14           ` Sricharan R
2015-04-13  4:14             ` Sricharan R
2015-04-13  4:14             ` Sricharan R
2015-04-11  7:09 ` [PATCH V3 6/6] dts: msm8974: Add dma channels for blsp2_i2c1 node Sricharan R
2015-04-11  7:09   ` Sricharan R

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1428736145-18361-5-git-send-email-sricharan@codeaurora.org \
    --to=sricharan-sgv2jx0feol9jmxxk+q4oq@public.gmane.org \
    --cc=agross-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org \
    --cc=devicetree-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
    --cc=dmaengine-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
    --cc=galak-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org \
    --cc=iivanov-NEYub+7Iv8PQT0dZR+AlfA@public.gmane.org \
    --cc=linux-arm-kernel-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r@public.gmane.org \
    --cc=linux-arm-msm-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
    --cc=linux-i2c-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
    --cc=linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.