linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
To: gregkh@linuxfoundation.org, broonie@kernel.org
Cc: vkoul@kernel.org, ctatlor97@gmail.com,
	alsa-devel@alsa-project.org, linux-arm-msm@vger.kernel.org,
	linux-kernel@vger.kernel.org, sdharia@quicinc.com,
	Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Subject: [PATCH v2 2/2] slimbus: ngd: add stream support
Date: Thu,  5 Jul 2018 14:54:26 +0100	[thread overview]
Message-ID: <20180705135426.20748-3-srinivas.kandagatla@linaro.org> (raw)
In-Reply-To: <20180705135426.20748-1-srinivas.kandagatla@linaro.org>

This patch adds support to stream support, this involve implementing
user specific implementation of Data channel management and channel
management SLIMbus messages.

Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
---
 drivers/slimbus/qcom-ngd-ctrl.c | 149 +++++++++++++++++++++++++++++++++++++++-
 1 file changed, 147 insertions(+), 2 deletions(-)

diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index 8554e3f43522..8be4d6786c61 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -603,7 +603,9 @@ static void qcom_slim_ngd_rx(struct qcom_slim_ngd_ctrl *ctrl, u8 *buf)
 
 	if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
 	    mc == SLIM_MSG_MC_REPLY_VALUE || (mc == SLIM_USR_MC_ADDR_REPLY &&
-	    mt == SLIM_MSG_MT_SRC_REFERRED_USER)) {
+	    mt == SLIM_MSG_MT_SRC_REFERRED_USER) ||
+		(mc == SLIM_USR_MC_GENERIC_ACK &&
+		 mt == SLIM_MSG_MT_SRC_REFERRED_USER)) {
 		slim_msg_response(&ctrl->ctrl, &buf[4], buf[3], len - 4);
 		pm_runtime_mark_last_busy(ctrl->dev);
 	}
@@ -766,10 +768,14 @@ static int qcom_slim_ngd_xfer_msg(struct slim_controller *sctrl,
 {
 	struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
 	DECLARE_COMPLETION_ONSTACK(tx_sent);
-	int ret, timeout;
+	DECLARE_COMPLETION_ONSTACK(done);
+	int ret, timeout, i;
+	u8 wbuf[SLIM_MSGQ_BUF_LEN];
+	u8 rbuf[SLIM_MSGQ_BUF_LEN];
 	u32 *pbuf;
 	u8 *puc;
 	u8 la = txn->la;
+	bool usr_msg = false;
 
 	if (txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)
 		return -EPROTONOSUPPORT;
@@ -794,6 +800,48 @@ static int qcom_slim_ngd_xfer_msg(struct slim_controller *sctrl,
 		return -ENOMEM;
 	}
 
+	if (txn->mt == SLIM_MSG_MT_CORE &&
+		(txn->mc == SLIM_MSG_MC_CONNECT_SOURCE ||
+		txn->mc == SLIM_MSG_MC_CONNECT_SINK ||
+		txn->mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
+		txn->mt = SLIM_MSG_MT_DEST_REFERRED_USER;
+		switch (txn->mc) {
+		case SLIM_MSG_MC_CONNECT_SOURCE:
+			txn->mc = SLIM_USR_MC_CONNECT_SRC;
+			break;
+		case SLIM_MSG_MC_CONNECT_SINK:
+			txn->mc = SLIM_USR_MC_CONNECT_SINK;
+			break;
+		case SLIM_MSG_MC_DISCONNECT_PORT:
+			txn->mc = SLIM_USR_MC_DISCONNECT_PORT;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		usr_msg = true;
+		i = 0;
+		wbuf[i++] = txn->la;
+		la = SLIM_LA_MGR;
+		wbuf[i++] = txn->msg->wbuf[0];
+		if (txn->mc != SLIM_USR_MC_DISCONNECT_PORT)
+			wbuf[i++] = txn->msg->wbuf[1];
+
+		txn->comp = &done;
+		ret = slim_alloc_txn_tid(sctrl, txn);
+		if (ret) {
+			dev_err(ctrl->dev, "Unable to allocate TID\n");
+			return ret;
+		}
+
+		wbuf[i++] = txn->tid;
+
+		txn->msg->num_bytes = i;
+		txn->msg->wbuf = wbuf;
+		txn->msg->rbuf = rbuf;
+		txn->rl = txn->msg->num_bytes + 4;
+	}
+
 	/* HW expects length field to be excluded */
 	txn->rl--;
 	puc = (u8 *)pbuf;
@@ -830,6 +878,15 @@ static int qcom_slim_ngd_xfer_msg(struct slim_controller *sctrl,
 		return -ETIMEDOUT;
 	}
 
+	if (usr_msg) {
+		timeout = wait_for_completion_timeout(&done, HZ);
+		if (!timeout) {
+			dev_err(sctrl->dev, "TX timed out:MC:0x%x,mt:0x%x",
+				txn->mc, txn->mt);
+			return -ETIMEDOUT;
+		}
+	}
+
 	return 0;
 }
 
@@ -856,6 +913,93 @@ static int qcom_slim_ngd_xfer_msg_sync(struct slim_controller *ctrl,
 	return 0;
 }
 
+static int qcom_slim_ngd_enable_stream(struct slim_stream_runtime *rt)
+{
+	struct slim_device *sdev = rt->dev;
+	struct slim_controller *ctrl = sdev->ctrl;
+	struct slim_val_inf msg =  {0};
+	u8 wbuf[SLIM_MSGQ_BUF_LEN];
+	u8 rbuf[SLIM_MSGQ_BUF_LEN];
+	struct slim_msg_txn txn = {0,};
+	int i, ret;
+
+	txn.mt = SLIM_MSG_MT_DEST_REFERRED_USER;
+	txn.dt = SLIM_MSG_DEST_LOGICALADDR;
+	txn.la = SLIM_LA_MGR;
+	txn.ec = 0;
+	txn.msg = &msg;
+	txn.msg->num_bytes = 0;
+	txn.msg->wbuf = wbuf;
+	txn.msg->rbuf = rbuf;
+
+	for (i = 0; i < rt->num_ports; i++) {
+		struct slim_port *port = &rt->ports[i];
+
+		if (txn.msg->num_bytes == 0) {
+			int seg_interval = SLIM_SLOTS_PER_SUPERFRAME/rt->ratem;
+			int exp;
+
+			wbuf[txn.msg->num_bytes++] = sdev->laddr;
+			wbuf[txn.msg->num_bytes] = rt->bps >> 2 |
+						   (port->ch.aux_fmt << 6);
+
+			/* Data channel segment interval not multiple of 3 */
+			exp = seg_interval % 3;
+			if (exp)
+				wbuf[txn.msg->num_bytes] |= BIT(5);
+
+			txn.msg->num_bytes++;
+			wbuf[txn.msg->num_bytes++] = exp << 4 | rt->prot;
+
+			if (rt->prot == SLIM_PROTO_ISO)
+				wbuf[txn.msg->num_bytes++] =
+						port->ch.prrate |
+						SLIM_CHANNEL_CONTENT_FL;
+			else
+				wbuf[txn.msg->num_bytes++] =  port->ch.prrate;
+
+			ret = slim_alloc_txn_tid(ctrl, &txn);
+			if (ret) {
+				dev_err(&sdev->dev, "Fail to allocate TID\n");
+				return -ENXIO;
+			}
+			wbuf[txn.msg->num_bytes++] = txn.tid;
+		}
+		wbuf[txn.msg->num_bytes++] = port->ch.id;
+	}
+
+	txn.mc = SLIM_USR_MC_DEF_ACT_CHAN;
+	txn.rl = txn.msg->num_bytes + 4;
+	ret = qcom_slim_ngd_xfer_msg_sync(ctrl, &txn);
+	if (ret) {
+		slim_free_txn_tid(ctrl, &txn);
+		dev_err(&sdev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn.mc,
+				txn.mt);
+		return ret;
+	}
+
+	txn.mc = SLIM_USR_MC_RECONFIG_NOW;
+	txn.msg->num_bytes = 2;
+	wbuf[1] = sdev->laddr;
+	txn.rl = txn.msg->num_bytes + 4;
+
+	ret = slim_alloc_txn_tid(ctrl, &txn);
+	if (ret) {
+		dev_err(ctrl->dev, "Fail to allocate TID\n");
+		return ret;
+	}
+
+	wbuf[0] = txn.tid;
+	ret = qcom_slim_ngd_xfer_msg_sync(ctrl, &txn);
+	if (ret) {
+		slim_free_txn_tid(ctrl, &txn);
+		dev_err(&sdev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn.mc,
+				txn.mt);
+	}
+
+	return ret;
+}
+
 static int qcom_slim_ngd_get_laddr(struct slim_controller *ctrl,
 				   struct slim_eaddr *ea, u8 *laddr)
 {
@@ -1288,6 +1432,7 @@ static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev)
 	ctrl->ctrl.a_framer = &ctrl->framer;
 	ctrl->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
 	ctrl->ctrl.get_laddr = qcom_slim_ngd_get_laddr;
+	ctrl->ctrl.enable_stream = qcom_slim_ngd_enable_stream;
 	ctrl->ctrl.xfer_msg = qcom_slim_ngd_xfer_msg;
 	ctrl->ctrl.wakeup = NULL;
 	ctrl->state = QCOM_SLIM_NGD_CTRL_DOWN;
-- 
2.16.2


      parent reply	other threads:[~2018-07-05 13:57 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-07-05 13:54 [PATCH v2 0/2] slimbus: Add Stream Support Srinivas Kandagatla
2018-07-05 13:54 ` [PATCH v2 1/2] slimbus: stream: add stream support Srinivas Kandagatla
2018-07-05 13:54 ` Srinivas Kandagatla [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180705135426.20748-3-srinivas.kandagatla@linaro.org \
    --to=srinivas.kandagatla@linaro.org \
    --cc=alsa-devel@alsa-project.org \
    --cc=broonie@kernel.org \
    --cc=ctatlor97@gmail.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=linux-arm-msm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=sdharia@quicinc.com \
    --cc=vkoul@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).