Linux-Devicetree Archive on lore.kernel.org
 help / color / Atom feed
* [PATCH 1/3] soc: qcom: Introduce Protection Domain Restart helpers
       [not found] <20191118142728.30187-1-sibis@codeaurora.org>
@ 2019-11-18 14:27 ` Sibi Sankar
  2019-11-18 14:28 ` [PATCH 2/3] dt-bindings: soc: qcom: apr: Add protection domain bindings Sibi Sankar
                   ` (5 subsequent siblings)
  6 siblings, 0 replies; 12+ messages in thread
From: Sibi Sankar @ 2019-11-18 14:27 UTC (permalink / raw)
  To: bjorn.andersson, srinivas.kandagatla, robh+dt, tsoni
  Cc: agross, mark.rutland, linux-arm-msm, linux-remoteproc,
	linux-kernel, devicetree, rnayak, Sibi Sankar

Qualcomm SoCs (starting with MSM8998) allow for multiple protection domains
to run on the same Q6 sub-system. This allows for services like ATH10K WLAN
FW to have their own separate address space and crash/recover without
disrupting the modem and other PDs running on the same sub-system. The PDR
helpers introduces an abstraction that allows for tracking/controlling the
life cycle of protection domains running on various Q6 sub-systems.

Signed-off-by: Sibi Sankar <sibis@codeaurora.org>
---
 drivers/soc/qcom/Kconfig         |   5 +
 drivers/soc/qcom/Makefile        |   1 +
 drivers/soc/qcom/pdr_interface.c | 685 +++++++++++++++++++++++++++++++
 drivers/soc/qcom/pdr_internal.h  | 375 +++++++++++++++++
 include/linux/soc/qcom/pdr.h     | 102 +++++
 include/linux/soc/qcom/qmi.h     |   1 +
 6 files changed, 1169 insertions(+)
 create mode 100644 drivers/soc/qcom/pdr_interface.c
 create mode 100644 drivers/soc/qcom/pdr_internal.h
 create mode 100644 include/linux/soc/qcom/pdr.h

diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 79d826553ac82..5c4e76837f59b 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -76,6 +76,11 @@ config QCOM_OCMEM
           requirements. This is typically used by the GPU, camera/video, and
           audio components on some Snapdragon SoCs.
 
+config QCOM_PDR_HELPERS
+	tristate
+	depends on ARCH_QCOM || COMPILE_TEST
+	select QCOM_QMI_HELPERS
+
 config QCOM_PM
 	bool "Qualcomm Power Management"
 	depends on ARCH_QCOM && !ARM64
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 9fb35c8a495e1..5d6b83dc58e82 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_QCOM_GLINK_SSR) +=	glink_ssr.o
 obj-$(CONFIG_QCOM_GSBI)	+=	qcom_gsbi.o
 obj-$(CONFIG_QCOM_MDT_LOADER)	+= mdt_loader.o
 obj-$(CONFIG_QCOM_OCMEM)	+= ocmem.o
+obj-$(CONFIG_QCOM_PDR_HELPERS)	+= pdr_interface.o
 obj-$(CONFIG_QCOM_PM)	+=	spm.o
 obj-$(CONFIG_QCOM_QMI_HELPERS)	+= qmi_helpers.o
 qmi_helpers-y	+= qmi_encdec.o qmi_interface.o
diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
new file mode 100644
index 0000000000000..2df8167663c85
--- /dev/null
+++ b/drivers/soc/qcom/pdr_interface.c
@@ -0,0 +1,685 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/completion.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/qrtr.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+
+#include "pdr_internal.h"
+
+struct pdr_list_node {
+	u16 transaction_id;
+	struct pdr_service *pds;
+	struct list_head node;
+};
+
+static int servreg_locator_new_server(struct qmi_handle *qmi,
+				      struct qmi_service *svc)
+{
+	struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+					      servloc_client);
+
+	/* Create a Local client port for QMI communication */
+	pdr->servloc_addr.sq_family = AF_QIPCRTR;
+	pdr->servloc_addr.sq_node = svc->node;
+	pdr->servloc_addr.sq_port = svc->port;
+
+	complete_all(&pdr->locator_available);
+
+	return 0;
+}
+
+static void servreg_locator_del_server(struct qmi_handle *qmi,
+				       struct qmi_service *svc)
+{
+	struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+					      servloc_client);
+
+	reinit_completion(&pdr->locator_available);
+}
+
+static struct qmi_ops service_locator_ops = {
+	.new_server = servreg_locator_new_server,
+	.del_server = servreg_locator_del_server,
+};
+
+static int pdr_register_listener(struct pdr_handle *pdr,
+				 struct pdr_service *pds,
+				 bool enable)
+{
+	struct servreg_register_listener_resp resp;
+	struct servreg_register_listener_req req;
+	struct qmi_txn txn;
+	int ret;
+
+	ret = qmi_txn_init(&pdr->servreg_client, &txn,
+			   servreg_register_listener_resp_ei,
+			   &resp);
+
+	if (ret < 0)
+		return ret;
+
+	req.enable = enable;
+	strcpy(req.service_path, pds->service_path);
+
+	ret = qmi_send_request(&pdr->servreg_client, &pdr->servreg_addr,
+			       &txn, SERVREG_REGISTER_LISTENER_REQ,
+			       SERVREG_REGISTER_LISTENER_REQ_LEN,
+			       servreg_register_listener_req_ei,
+			       &req);
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		return ret;
+	}
+
+	ret = qmi_txn_wait(&txn, 5 * HZ);
+	if (ret < 0) {
+		pr_err("PDR: %s register listener txn wait failed: %d\n",
+		       pds->service_path, ret);
+		return ret;
+	}
+
+	/* Check the response */
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		pr_err("PDR: %s register listener failed: 0x%x\n",
+		       pds->service_path, resp.resp.error);
+		return ret;
+	}
+
+	if ((int)resp.curr_state < INT_MIN || (int)resp.curr_state > INT_MAX)
+		pr_err("PDR: %s notification state invalid: 0x%x\n",
+		       pds->service_path, resp.curr_state);
+
+	pds->state = resp.curr_state;
+
+	return 0;
+}
+
+static void pdr_servreg_work(struct work_struct *work)
+{
+	struct pdr_handle *pdr = container_of(work, struct pdr_handle,
+					      servreg_work);
+	struct pdr_list_node *servreg, *tmp;
+	struct pdr_service *pds;
+
+	list_for_each_entry_safe(servreg, tmp, &pdr->servreg_list, node) {
+		pds = servreg->pds;
+		pdr_register_listener(pdr, pds, true);
+		pdr->status(pdr, pds);
+		list_del(&servreg->node);
+		kfree(servreg);
+	}
+}
+
+static int servreg_notifier_new_server(struct qmi_handle *qmi,
+				       struct qmi_service *svc)
+{
+	struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+					      servreg_client);
+	struct pdr_list_node *servreg;
+	struct pdr_service *pds, *tmp;
+
+	/* Create a Local client port for QMI communication */
+	pdr->servreg_addr.sq_family = AF_QIPCRTR;
+	pdr->servreg_addr.sq_node = svc->node;
+	pdr->servreg_addr.sq_port = svc->port;
+
+	servreg = kzalloc(sizeof(*servreg), GFP_KERNEL);
+	if (!servreg)
+		return -ENOMEM;
+
+	list_for_each_entry_safe(pds, tmp, &pdr->lookups, node) {
+		if (pds->service == svc->service &&
+		    pds->instance == svc->instance) {
+			pds->service_connected = true;
+			servreg->pds = pds;
+			mutex_lock(&pdr->list_lock);
+			list_add_tail(&servreg->node, &pdr->servreg_list);
+			mutex_unlock(&pdr->list_lock);
+			break;
+		}
+	}
+
+	queue_work(pdr->servreg_wq, &pdr->servreg_work);
+
+	return 0;
+}
+
+static void pdr_servdel_work(struct work_struct *work)
+{
+	struct pdr_handle *pdr = container_of(work, struct pdr_handle,
+					      servdel_work);
+	struct pdr_list_node *servreg, *tmp;
+	struct pdr_service *pds;
+
+	list_for_each_entry_safe(servreg, tmp, &pdr->servdel_list, node) {
+		pds = servreg->pds;
+		pds->service_connected = false;
+		pds->state = SERVREG_SERVICE_STATE_DOWN;
+		pdr->status(pdr, pds);
+		list_del(&servreg->node);
+		kfree(servreg);
+	}
+}
+
+static void servreg_notifier_del_server(struct qmi_handle *qmi,
+					struct qmi_service *svc)
+{
+	struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+					      servreg_client);
+	struct pdr_list_node *servreg;
+	struct pdr_service *pds, *tmp;
+
+	servreg = kzalloc(sizeof(*servreg), GFP_KERNEL);
+	if (!servreg)
+		return;
+
+	list_for_each_entry_safe(pds, tmp, &pdr->lookups, node) {
+		if (pds->service == svc->service &&
+		    pds->instance == svc->instance) {
+			servreg->pds = pds;
+			mutex_lock(&pdr->list_lock);
+			list_add_tail(&servreg->node, &pdr->servdel_list);
+			mutex_unlock(&pdr->list_lock);
+			break;
+		}
+	}
+
+	queue_work(pdr->servreg_wq, &pdr->servdel_work);
+}
+
+static struct qmi_ops service_notifier_ops = {
+	.new_server = servreg_notifier_new_server,
+	.del_server = servreg_notifier_del_server,
+};
+
+static int pdr_send_indack_msg(struct pdr_handle *pdr, struct pdr_service *pds,
+			       u16 tid)
+{
+	struct servreg_set_ack_resp resp;
+	struct servreg_set_ack_req req;
+	struct qmi_txn txn;
+	int ret;
+
+	ret = qmi_txn_init(&pdr->servreg_client, &txn, servreg_set_ack_resp_ei,
+			   &resp);
+	if (ret < 0)
+		return ret;
+
+	req.transaction_id = tid;
+	strcpy(req.service_path, pds->service_path);
+
+	ret = qmi_send_request(&pdr->servreg_client, &pdr->servreg_addr,
+			       &txn, SERVREG_SET_ACK_REQ,
+			       SERVREG_SET_ACK_REQ_LEN,
+			       servreg_set_ack_req_ei,
+			       &req);
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		return ret;
+	}
+
+	ret = qmi_txn_wait(&txn, 5 * HZ);
+	if (ret < 0) {
+		pr_err("PDR: %s indication ack txn wait failed: %d\n",
+		       pds->service_path, ret);
+		return ret;
+	}
+
+	/* Check the response */
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01)
+		pr_err("PDR: %s indication ack failed: 0x%x\n",
+		       pds->service_path, resp.resp.error);
+
+	return 0;
+}
+
+static void pdr_indack_work(struct work_struct *work)
+{
+	struct pdr_handle *pdr = container_of(work, struct pdr_handle,
+					      indack_work);
+	struct pdr_list_node *ind, *tmp;
+	struct pdr_service *pds;
+
+	list_for_each_entry_safe(ind, tmp, &pdr->indack_list, node) {
+		pds = ind->pds;
+		pdr_send_indack_msg(pdr, pds, ind->transaction_id);
+		pdr->status(pdr, pds);
+		list_del(&ind->node);
+		kfree(ind);
+	}
+}
+
+static void pdr_servreg_ind_cb(struct qmi_handle *qmi,
+			       struct sockaddr_qrtr *sq,
+			       struct qmi_txn *txn, const void *data)
+{
+	struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+					      servreg_client);
+	const struct servreg_state_updated_ind *ind_msg = data;
+	struct pdr_list_node *ind;
+	struct pdr_service *pds;
+
+	if (!ind_msg || !ind_msg->service_path ||
+	    strlen(ind_msg->service_path) > (SERVREG_NAME_LENGTH + 1))
+		return;
+
+	list_for_each_entry(pds, &pdr->lookups, node) {
+		if (!strcmp(pds->service_path, ind_msg->service_path))
+			goto found;
+	}
+	return;
+
+found:
+	pds->state = ind_msg->curr_state;
+
+	ind = kzalloc(sizeof(*ind), GFP_KERNEL);
+	if (!ind)
+		return;
+
+	pr_info("PDR: Indication received from %s, state: 0x%x, trans-id: %d\n",
+		ind_msg->service_path, ind_msg->curr_state,
+		ind_msg->transaction_id);
+
+	ind->transaction_id = ind_msg->transaction_id;
+	ind->pds = pds;
+
+	mutex_lock(&pdr->list_lock);
+	list_add_tail(&ind->node, &pdr->indack_list);
+	mutex_unlock(&pdr->list_lock);
+
+	queue_work(pdr->indack_wq, &pdr->indack_work);
+}
+
+static struct qmi_msg_handler qmi_indication_handler[] = {
+	{
+		.type = QMI_INDICATION,
+		.msg_id = SERVREG_STATE_UPDATED_IND_ID,
+		.ei = servreg_state_updated_ind_ei,
+		.decoded_size = sizeof(struct servreg_state_updated_ind),
+		.fn = pdr_servreg_ind_cb,
+	},
+	{}
+};
+
+static int pdr_get_domain_list(struct servreg_get_domain_list_req *req,
+			       struct servreg_get_domain_list_resp *resp,
+			       struct pdr_handle *pdr)
+{
+	struct qmi_txn txn;
+	int ret;
+
+	ret = qmi_txn_init(&pdr->servloc_client, &txn,
+			   servreg_get_domain_list_resp_ei, resp);
+	if (ret < 0)
+		return ret;
+
+	ret = qmi_send_request(&pdr->servloc_client,
+			       &pdr->servloc_addr,
+			       &txn, SERVREG_GET_DOMAIN_LIST_REQ,
+			       SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN,
+			       servreg_get_domain_list_req_ei,
+			       req);
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		return ret;
+	}
+
+	ret = qmi_txn_wait(&txn, 5 * HZ);
+	if (ret < 0) {
+		pr_err("PDR: %s get domain list txn wait failed: %d\n",
+		       req->service_name, ret);
+		return ret;
+	}
+
+	/* Check the response */
+	if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		pr_err("PDR: %s get domain list failed: 0x%x\n",
+		       req->service_name, resp->resp.error);
+		return -EREMOTEIO;
+	}
+
+	return ret;
+}
+
+static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds)
+{
+	struct servreg_get_domain_list_resp *resp = NULL;
+	struct servreg_get_domain_list_req req;
+	int db_rev_count = 0, domains_read = 0;
+	struct servreg_location_entry *entry;
+	int ret, i;
+
+	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+	if (!resp)
+		return -ENOMEM;
+
+	/* Prepare req message */
+	strcpy(req.service_name, pds->service_name);
+	req.domain_offset_valid = true;
+	req.domain_offset = 0;
+
+	do {
+		req.domain_offset = domains_read;
+		ret = pdr_get_domain_list(&req, resp, pdr);
+		if (ret < 0)
+			goto out;
+
+		if (!domains_read)
+			db_rev_count = resp->db_rev_count;
+
+		if (db_rev_count != resp->db_rev_count) {
+			ret = -EAGAIN;
+			goto out;
+		}
+
+		for (i = domains_read; i < resp->domain_list_len; i++) {
+			entry = &resp->domain_list[i];
+
+			if (strlen(entry->name) > (SERVREG_NAME_LENGTH + 1))
+				continue;
+
+			if (!strcmp(entry->name, pds->service_path)) {
+				pds->service_data_valid = entry->service_data_valid;
+				pds->service_data = entry->service_data;
+				pds->instance = entry->instance;
+				goto out;
+			}
+		}
+
+		/* Update ret to indicate that the service is not yet found */
+		ret = -EINVAL;
+
+		/* Always read total_domains from the response msg */
+		if (resp->domain_list_len >  resp->total_domains)
+			resp->domain_list_len = resp->total_domains;
+
+		domains_read += resp->domain_list_len;
+	} while (domains_read < resp->total_domains);
+out:
+	kfree(resp);
+	return ret;
+}
+
+static void pdr_servloc_work(struct work_struct *work)
+{
+	struct pdr_handle *pdr = container_of(work, struct pdr_handle,
+					      servloc_work);
+	struct pdr_list_node *servloc, *tmp;
+	struct pdr_service *pds;
+	int ret;
+
+	list_for_each_entry_safe(servloc, tmp, &pdr->servloc_list, node) {
+		pds = servloc->pds;
+
+		/* wait for PD Mapper to come up */
+		ret = wait_for_completion_timeout(&pdr->locator_available, 10 * HZ);
+		if (!ret) {
+			pr_err("PDR: SERVICE LOCATOR service wait failed\n");
+			ret = -ETIMEDOUT;
+			goto err;
+		}
+
+		ret = pdr_locate_service(pdr, pds);
+		if (ret < 0) {
+			pr_err("PDR: service lookup for %s failed: %d\n",
+			       pds->service_name, ret);
+			goto err;
+		}
+
+		qmi_add_lookup(&pdr->servreg_client, pds->service, 1,
+			       pds->instance);
+err:
+		list_del(&servloc->node);
+		kfree(servloc);
+
+		/* cleanup pds on error */
+		if (ret < 0) {
+			pds->state = SERVREG_LOCATOR_ERR;
+			pdr->status(pdr, pds);
+			list_del(&pds->node);
+			kfree(pds);
+		}
+	}
+}
+
+/**
+ * pdr_add_lookup() - register a tracking request for a PD
+ * @pdr:		PDR client handle
+ * @service_name:	service name of the tracking request
+ * @service_path:	service path of the tracking request
+ *
+ * Registering a pdr lookup allows for tracking the life cycle of the PD.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int pdr_add_lookup(struct pdr_handle *pdr, const char *service_name,
+		   const char *service_path)
+{
+	struct pdr_service *pds, *pds_iter, *tmp;
+	struct pdr_list_node *servloc;
+	int ret;
+
+	if (!service_name || strlen(service_name) > (SERVREG_NAME_LENGTH + 1) ||
+	    !service_path || strlen(service_path) > (SERVREG_NAME_LENGTH + 1))
+		return -EINVAL;
+
+	servloc = kzalloc(sizeof(*servloc), GFP_KERNEL);
+	if (!servloc)
+		return -ENOMEM;
+
+	pds = kzalloc(sizeof(*pds), GFP_KERNEL);
+	if (!pds) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	pds->service = SERVREG_NOTIFIER_SERVICE;
+	strcpy(pds->service_name, service_name);
+	strcpy(pds->service_path, service_path);
+	servloc->pds = pds;
+
+	mutex_lock(&pdr->list_lock);
+	list_for_each_entry_safe(pds_iter, tmp, &pdr->lookups, node) {
+		if (!strcmp(pds_iter->service_path, service_path)) {
+			mutex_unlock(&pdr->list_lock);
+			ret = -EALREADY;
+			goto err;
+		}
+	}
+
+	list_add(&pds->node, &pdr->lookups);
+	list_add(&servloc->node, &pdr->servloc_list);
+	mutex_unlock(&pdr->list_lock);
+
+	schedule_work(&pdr->servloc_work);
+
+	return 0;
+err:
+	kfree(pds);
+	kfree(servloc);
+
+	return ret;
+}
+EXPORT_SYMBOL(pdr_add_lookup);
+
+/**
+ * pdr_restart_pd() - restart PD
+ * @pdr:		PDR client handle
+ * @service_path:	service path of restart request
+ *
+ * Restarts the PD tracked by the PDR client handle for a given service path.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int pdr_restart_pd(struct pdr_handle *pdr, const char *service_path)
+{
+	struct servreg_restart_pd_req req;
+	struct servreg_restart_pd_resp resp;
+	struct pdr_service *pds = NULL, *pds_iter, *tmp;
+	struct qmi_txn txn;
+	int ret;
+
+	if (!service_path || strlen(service_path) > (SERVREG_NAME_LENGTH + 1))
+		return -EINVAL;
+
+	list_for_each_entry_safe(pds_iter, tmp, &pdr->lookups, node) {
+		if (!pds_iter->service_connected)
+			continue;
+
+		if (!strcmp(pds_iter->service_path, service_path)) {
+			pds = pds_iter;
+			break;
+		}
+	}
+
+	if (!pds)
+		return -EINVAL;
+
+	/* Prepare req message */
+	strcpy(req.service_path, pds->service_path);
+
+	ret = qmi_txn_init(&pdr->servreg_client, &txn,
+			   servreg_restart_pd_resp_ei,
+			   &resp);
+	if (ret < 0)
+		return ret;
+
+	ret = qmi_send_request(&pdr->servreg_client, &pdr->servreg_addr,
+			       &txn, SERVREG_RESTART_PD_REQ,
+			       SERVREG_RESTART_PD_REQ_MAX_LEN,
+			       servreg_restart_pd_req_ei, &req);
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		return ret;
+	}
+
+	ret = qmi_txn_wait(&txn, 5 * HZ);
+	if (ret < 0) {
+		pr_err("PDR: %s PD restart txn wait failed: %d\n",
+		       pds->service_path, ret);
+		return ret;
+	}
+
+	/* Check response if PDR is disabled */
+	if (resp.resp.result == QMI_RESULT_FAILURE_V01 &&
+	    resp.resp.error == QMI_ERR_DISABLED_V01) {
+		pr_err("PDR: %s PD restart is disabled: 0x%x\n",
+		       pds->service_path, resp.resp.error);
+		return -EOPNOTSUPP;
+	}
+
+	/* Check the response for other error case*/
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		pr_err("PDR: %s request for PD restart failed: 0x%x\n",
+		       pds->service_path, resp.resp.error);
+		return -EREMOTEIO;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(pdr_restart_pd);
+
+/**
+ * pdr_handle_init() - initialize the PDR client handle
+ * @pdr:	PDR client handle
+ * @status:	function to be called on PD state change
+ *
+ * Initializes the PDR client handle to allow for tracking/restart of PDs.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int pdr_handle_init(struct pdr_handle *pdr,
+		    int (*status)(struct pdr_handle *pdr,
+				  struct pdr_service *pds))
+{
+	int ret;
+
+	if (!status)
+		return -EINVAL;
+
+	pdr->status = *status;
+
+	mutex_init(&pdr->list_lock);
+	init_completion(&pdr->locator_available);
+
+	INIT_LIST_HEAD(&pdr->lookups);
+	INIT_LIST_HEAD(&pdr->servloc_list);
+	INIT_LIST_HEAD(&pdr->servreg_list);
+	INIT_LIST_HEAD(&pdr->servdel_list);
+	INIT_LIST_HEAD(&pdr->indack_list);
+
+	INIT_WORK(&pdr->servloc_work, pdr_servloc_work);
+	INIT_WORK(&pdr->servreg_work, pdr_servreg_work);
+	INIT_WORK(&pdr->servdel_work, pdr_servdel_work);
+	INIT_WORK(&pdr->indack_work, pdr_indack_work);
+
+	pdr->servreg_wq = create_singlethread_workqueue("pdr_servreg_wq");
+	if (!pdr->servreg_wq)
+		return -ENOMEM;
+
+	pdr->indack_wq = alloc_ordered_workqueue("pdr_indack_wq", WQ_HIGHPRI);
+	if (!pdr->indack_wq) {
+		ret = -ENOMEM;
+		goto destroy_servreg;
+	}
+
+	ret = qmi_handle_init(&pdr->servloc_client,
+			      SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN,
+			      &service_locator_ops, NULL);
+	if (ret < 0)
+		goto destroy_indack;
+
+	ret = qmi_handle_init(&pdr->servreg_client,
+			      SERVREG_STATE_UPDATED_IND_MAX_LEN,
+			      &service_notifier_ops, qmi_indication_handler);
+	if (ret < 0)
+		goto release_handle;
+
+	qmi_add_lookup(&pdr->servloc_client, SERVREG_LOCATOR_SERVICE, 1, 1);
+
+	return 0;
+
+release_handle:
+	qmi_handle_release(&pdr->servloc_client);
+destroy_indack:
+	destroy_workqueue(pdr->indack_wq);
+destroy_servreg:
+	destroy_workqueue(pdr->servreg_wq);
+
+	return ret;
+}
+EXPORT_SYMBOL(pdr_handle_init);
+
+/**
+ * pdr_handle_release() - release the PDR client handle
+ * @pdr:	PDR client handle
+ *
+ * Cleans up pending tracking requests and releases the underlying qmi handles.
+ */
+void pdr_handle_release(struct pdr_handle *pdr)
+{
+	struct pdr_service *pds, *tmp;
+
+	qmi_handle_release(&pdr->servloc_client);
+	qmi_handle_release(&pdr->servreg_client);
+
+	cancel_work_sync(&pdr->servloc_work);
+	cancel_work_sync(&pdr->servreg_work);
+	cancel_work_sync(&pdr->servdel_work);
+	cancel_work_sync(&pdr->indack_work);
+
+	destroy_workqueue(pdr->servreg_wq);
+	destroy_workqueue(pdr->indack_wq);
+
+	list_for_each_entry_safe(pds, tmp, &pdr->lookups, node) {
+		list_del(&pds->node);
+		kfree(pds);
+	}
+}
+EXPORT_SYMBOL(pdr_handle_release);
diff --git a/drivers/soc/qcom/pdr_internal.h b/drivers/soc/qcom/pdr_internal.h
new file mode 100644
index 0000000000000..adaf85d509507
--- /dev/null
+++ b/drivers/soc/qcom/pdr_internal.h
@@ -0,0 +1,375 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/soc/qcom/pdr.h>
+
+#define SERVREG_LOCATOR_SERVICE				0x40
+#define SERVREG_NOTIFIER_SERVICE			0x42
+
+#define SERVREG_REGISTER_LISTENER_REQ			0x20
+#define SERVREG_GET_DOMAIN_LIST_REQ			0x21
+#define SERVREG_STATE_UPDATED_IND_ID			0x22
+#define SERVREG_SET_ACK_REQ				0x23
+#define SERVREG_RESTART_PD_REQ				0x24
+
+#define SERVREG_DOMAIN_LIST_LENGTH			32
+#define SERVREG_RESTART_PD_REQ_MAX_LEN			67
+#define SERVREG_REGISTER_LISTENER_REQ_LEN		71
+#define SERVREG_SET_ACK_REQ_LEN				72
+#define SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN		74
+#define SERVREG_STATE_UPDATED_IND_MAX_LEN		79
+#define SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN		2389
+
+struct servreg_location_entry {
+	char name[SERVREG_NAME_LENGTH + 1];
+	u8 service_data_valid;
+	u32 service_data;
+	u32 instance;
+};
+
+struct qmi_elem_info servreg_location_entry_ei[] = {
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = SERVREG_NAME_LENGTH + 1,
+		.elem_size      = sizeof(char),
+		.array_type	= NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct servreg_location_entry,
+					   name),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type	= NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct servreg_location_entry,
+					   instance),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct servreg_location_entry,
+					   service_data_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type	= NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct servreg_location_entry,
+					   service_data),
+	},
+	{}
+};
+
+struct servreg_get_domain_list_req {
+	char service_name[SERVREG_NAME_LENGTH + 1];
+	u8 domain_offset_valid;
+	u32 domain_offset;
+};
+
+struct qmi_elem_info servreg_get_domain_list_req_ei[] = {
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = SERVREG_NAME_LENGTH + 1,
+		.elem_size      = sizeof(char),
+		.array_type	= NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct servreg_get_domain_list_req,
+					   service_name),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct servreg_get_domain_list_req,
+					   domain_offset_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type	= NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct servreg_get_domain_list_req,
+					   domain_offset),
+	},
+	{}
+};
+
+struct servreg_get_domain_list_resp {
+	struct qmi_response_type_v01 resp;
+	u8 total_domains_valid;
+	u16 total_domains;
+	u8 db_rev_count_valid;
+	u16 db_rev_count;
+	u8 domain_list_valid;
+	u32 domain_list_len;
+	struct servreg_location_entry domain_list[SERVREG_DOMAIN_LIST_LENGTH];
+};
+
+struct qmi_elem_info servreg_get_domain_list_resp_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct servreg_get_domain_list_resp,
+					   resp),
+		.ei_array      = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct servreg_get_domain_list_resp,
+					   total_domains_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.array_type	= NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct servreg_get_domain_list_resp,
+					   total_domains),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct servreg_get_domain_list_resp,
+					   db_rev_count_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.array_type	= NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct servreg_get_domain_list_resp,
+					   db_rev_count),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct servreg_get_domain_list_resp,
+					   domain_list_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct servreg_get_domain_list_resp,
+					   domain_list_len),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = SERVREG_DOMAIN_LIST_LENGTH,
+		.elem_size      = sizeof(struct servreg_location_entry),
+		.array_type	= NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct servreg_get_domain_list_resp,
+					   domain_list),
+		.ei_array      = servreg_location_entry_ei,
+	},
+	{}
+};
+
+struct servreg_register_listener_req {
+	u8 enable;
+	char service_path[SERVREG_NAME_LENGTH + 1];
+};
+
+struct qmi_elem_info servreg_register_listener_req_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct servreg_register_listener_req,
+					   enable),
+	},
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = SERVREG_NAME_LENGTH + 1,
+		.elem_size      = sizeof(char),
+		.array_type	= NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct servreg_register_listener_req,
+					   service_path),
+	},
+	{}
+};
+
+struct servreg_register_listener_resp {
+	struct qmi_response_type_v01 resp;
+	u8 curr_state_valid;
+	enum servreg_service_state curr_state;
+};
+
+struct qmi_elem_info servreg_register_listener_resp_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct servreg_register_listener_resp,
+					   resp),
+		.ei_array      = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct servreg_register_listener_resp,
+					   curr_state_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum servreg_service_state),
+		.array_type	= NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct servreg_register_listener_resp,
+					   curr_state),
+	},
+	{}
+};
+
+struct servreg_restart_pd_req {
+	char service_path[SERVREG_NAME_LENGTH + 1];
+};
+
+struct qmi_elem_info servreg_restart_pd_req_ei[] = {
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = SERVREG_NAME_LENGTH + 1,
+		.elem_size      = sizeof(char),
+		.array_type	= NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct servreg_restart_pd_req,
+					   service_path),
+	},
+	{}
+};
+
+struct servreg_restart_pd_resp {
+	struct qmi_response_type_v01 resp;
+};
+
+struct qmi_elem_info servreg_restart_pd_resp_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct servreg_restart_pd_resp,
+					   resp),
+		.ei_array      = qmi_response_type_v01_ei,
+	},
+	{}
+};
+
+struct servreg_state_updated_ind {
+	enum servreg_service_state curr_state;
+	char service_path[SERVREG_NAME_LENGTH + 1];
+	u16 transaction_id;
+};
+
+struct qmi_elem_info servreg_state_updated_ind_ei[] = {
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type	= NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct servreg_state_updated_ind,
+					   curr_state),
+	},
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = SERVREG_NAME_LENGTH + 1,
+		.elem_size      = sizeof(char),
+		.array_type	= NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct servreg_state_updated_ind,
+					   service_path),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.array_type	= NO_ARRAY,
+		.tlv_type       = 0x03,
+		.offset         = offsetof(struct servreg_state_updated_ind,
+					   transaction_id),
+	},
+	{}
+};
+
+struct servreg_set_ack_req {
+	char service_path[SERVREG_NAME_LENGTH + 1];
+	u16 transaction_id;
+};
+
+struct qmi_elem_info servreg_set_ack_req_ei[] = {
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = SERVREG_NAME_LENGTH + 1,
+		.elem_size      = sizeof(char),
+		.array_type	= NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct servreg_set_ack_req,
+					   service_path),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.array_type	= NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct servreg_set_ack_req,
+					   transaction_id),
+	},
+	{}
+};
+
+struct servreg_set_ack_resp {
+	struct qmi_response_type_v01 resp;
+};
+
+struct qmi_elem_info servreg_set_ack_resp_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct servreg_set_ack_resp,
+					   resp),
+		.ei_array       = qmi_response_type_v01_ei,
+	},
+	{}
+};
diff --git a/include/linux/soc/qcom/pdr.h b/include/linux/soc/qcom/pdr.h
new file mode 100644
index 0000000000000..22781e8cca567
--- /dev/null
+++ b/include/linux/soc/qcom/pdr.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __QCOM_PDR_HELPER__
+#define __QCOM_PDR_HELPER__
+
+#include <linux/soc/qcom/qmi.h>
+
+#define SERVREG_NAME_LENGTH	64
+
+enum servreg_service_state {
+	SERVREG_LOCATOR_ERR = 0x1,
+	SERVREG_SERVICE_STATE_DOWN = 0x0FFFFFFF,
+	SERVREG_SERVICE_STATE_UP = 0x1FFFFFFF,
+	SERVREG_SERVICE_STATE_EARLY_DOWN = 0x2FFFFFFF,
+	SERVREG_SERVICE_STATE_UNINIT = 0x7FFFFFFF,
+};
+
+/**
+ * struct pdr_service - context to track lookups/restarts
+ * @service_name:		name of the service running on the PD
+ * @service_path:		service path of the PD
+ * @service_data_valid:		indicates if service_data field has valid data
+ * @service_data:		service data provided by servreg_locator service
+ * @service_connected:		current state of servreg_notifier qmi service
+ * @state:			current state of PD
+ * @service:			servreg_notifer service type
+ * @instance:			instance id of the @service
+ * @priv:			handle for client's use
+ * @node:			list_head for house keeping
+ */
+struct pdr_service {
+	char service_name[SERVREG_NAME_LENGTH + 1];
+	char service_path[SERVREG_NAME_LENGTH + 1];
+
+	u8 service_data_valid;
+	u32 service_data;
+
+	bool service_connected;
+	int state;
+
+	unsigned int instance;
+	unsigned int service;
+
+	void *priv;
+	struct list_head node;
+};
+
+/**
+ * struct pdr_handle - PDR context
+ * @servloc_client:	servreg_locator qmi handle
+ * @servreg_client:	servreg_notifier qmi handle
+ * @servloc_addr:	socket addr of @servloc_client
+ * @servreg_addr:	socket addr of @servreg_client
+ * @lookups:		list of lookup requests
+ * @servloc_list:	list of get domain requests to servreg_locator service
+ * @servreg_list:	list of new messages from servreg_notifer services
+ * @servdel_list:	list of del messages from servreg_notifer services
+ * @indack_list:	list of qmi indications from servreg_notifier services
+ * @list_lock:		lock for modifications of lists
+ * @locator_available:	completion variable to track servreg_locator service
+ * @servloc_work:	work for handling lookup requests
+ * @servreg_work:	work for registering with servreg_notifier service
+ * @servdel_work:	work for notifying PD down service on del server
+ * @indack_work:	work for acking the qmi indications
+ * @servreg_wq:		work queue to post @servreg_work and @servdel_work on
+ * @indack_wq:		work queue to post @indack_work on
+ * @status:		callback to inform the client on PD service state change
+ */
+struct pdr_handle {
+	struct qmi_handle servloc_client;
+	struct qmi_handle servreg_client;
+
+	struct sockaddr_qrtr servloc_addr;
+	struct sockaddr_qrtr servreg_addr;
+
+	struct list_head lookups;
+	struct list_head servloc_list;
+	struct list_head servreg_list;
+	struct list_head servdel_list;
+	struct list_head indack_list;
+
+	/* control access to pdr lists */
+	struct mutex list_lock;
+
+	struct completion locator_available;
+
+	struct work_struct servloc_work;
+	struct work_struct servreg_work;
+	struct work_struct servdel_work;
+	struct work_struct indack_work;
+
+	struct workqueue_struct *servreg_wq;
+	struct workqueue_struct *indack_wq;
+
+	int (*status)(struct pdr_handle *pdr, struct pdr_service *pds);
+};
+
+int pdr_handle_init(struct pdr_handle *pdr, int (*status)(struct pdr_handle *pdr, struct pdr_service *pds));
+int pdr_add_lookup(struct pdr_handle *pdr, const char *service_name, const char *service_path);
+int pdr_restart_pd(struct pdr_handle *pdr, const char *service_path);
+void pdr_handle_release(struct pdr_handle *pdr);
+
+#endif
diff --git a/include/linux/soc/qcom/qmi.h b/include/linux/soc/qcom/qmi.h
index 5efa2b67fa557..e712f94b89fcc 100644
--- a/include/linux/soc/qcom/qmi.h
+++ b/include/linux/soc/qcom/qmi.h
@@ -88,6 +88,7 @@ struct qmi_elem_info {
 #define QMI_ERR_CLIENT_IDS_EXHAUSTED_V01	5
 #define QMI_ERR_INVALID_ID_V01			41
 #define QMI_ERR_ENCODING_V01			58
+#define QMI_ERR_DISABLED_V01                    69
 #define QMI_ERR_INCOMPATIBLE_STATE_V01		90
 #define QMI_ERR_NOT_SUPPORTED_V01		94
 
-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 2/3] dt-bindings: soc: qcom: apr: Add protection domain bindings
       [not found] <20191118142728.30187-1-sibis@codeaurora.org>
  2019-11-18 14:27 ` [PATCH 1/3] soc: qcom: Introduce Protection Domain Restart helpers Sibi Sankar
@ 2019-11-18 14:28 ` Sibi Sankar
  2019-11-18 14:28 ` [PATCH 3/3] soc: qcom: apr: Add avs/audio tracking functionality Sibi Sankar
                   ` (4 subsequent siblings)
  6 siblings, 0 replies; 12+ messages in thread
From: Sibi Sankar @ 2019-11-18 14:28 UTC (permalink / raw)
  To: bjorn.andersson, srinivas.kandagatla, robh+dt, tsoni
  Cc: agross, mark.rutland, linux-arm-msm, linux-remoteproc,
	linux-kernel, devicetree, rnayak, Sibi Sankar

Add optional "qcom,protection-domain" bindings for APR services. This
helps to capture the dependencies between APR services and the PD on
which each apr service run.

Signed-off-by: Sibi Sankar <sibis@codeaurora.org>
---
 .../devicetree/bindings/soc/qcom/qcom,apr.txt | 59 +++++++++++++++++++
 1 file changed, 59 insertions(+)

diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt
index db501269f47b8..f87c0b2a48de4 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt
@@ -45,6 +45,12 @@ by the individual bindings for the specific service
 			12 - Ultrasound stream manager.
 			13 - Listen stream manager.
 
+- qcom,protection-domain
+	Usage: optional
+	Value type: <stringlist>
+	Definition: Must list the protection domain service name and path
+		    that the particular apr service has a dependency on.
+
 = EXAMPLE
 The following example represents a QDSP based sound card on a MSM8996 device
 which uses apr as communication between Apps and QDSP.
@@ -82,3 +88,56 @@ which uses apr as communication between Apps and QDSP.
 			...
 		};
 	};
+
+= EXAMPLE 2
+The following example represents a QDSP based sound card on SDM845 device.
+Here the apr services are dependent on "avs/audio" service running on AUDIO
+Protection Domain hosted on ADSP remote processor.
+
+	apr {
+		compatible = "qcom,apr-v2";
+		qcom,glink-channels = "apr_audio_svc";
+		qcom,apr-domain = <APR_DOMAIN_ADSP>;
+
+		q6core {
+			compatible = "qcom,q6core";
+			reg = <APR_SVC_ADSP_CORE>;
+			qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+		};
+
+		q6afe: q6afe {
+			compatible = "qcom,q6afe";
+			reg = <APR_SVC_AFE>;
+			qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+			q6afedai: dais {
+				compatible = "qcom,q6afe-dais";
+				#sound-dai-cells = <1>;
+
+				qi2s@22 {
+					reg = <22>;
+					qcom,sd-lines = <3>;
+				};
+			};
+		};
+
+		q6asm: q6asm {
+			compatible = "qcom,q6asm";
+			reg = <APR_SVC_ASM>;
+			qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+			q6asmdai: dais {
+				compatible = "qcom,q6asm-dais";
+				#sound-dai-cells = <1>;
+				iommus = <&apps_smmu 0x1821 0x0>;
+			};
+		};
+
+		q6adm: q6adm {
+			compatible = "qcom,q6adm";
+			reg = <APR_SVC_ADM>;
+			qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+			q6routing: routing {
+				compatible = "qcom,q6adm-routing";
+				#sound-dai-cells = <0>;
+			};
+		};
+	};
-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 3/3] soc: qcom: apr: Add avs/audio tracking functionality
       [not found] <20191118142728.30187-1-sibis@codeaurora.org>
  2019-11-18 14:27 ` [PATCH 1/3] soc: qcom: Introduce Protection Domain Restart helpers Sibi Sankar
  2019-11-18 14:28 ` [PATCH 2/3] dt-bindings: soc: qcom: apr: Add protection domain bindings Sibi Sankar
@ 2019-11-18 14:28 ` Sibi Sankar
       [not found] ` <0101016e7ee9c786-fcf80f4e-9b57-4d6b-8806-9ca408e21b55-000000@us-west-2.amazonses.com>
                   ` (3 subsequent siblings)
  6 siblings, 0 replies; 12+ messages in thread
From: Sibi Sankar @ 2019-11-18 14:28 UTC (permalink / raw)
  To: bjorn.andersson, srinivas.kandagatla, robh+dt, tsoni
  Cc: agross, mark.rutland, linux-arm-msm, linux-remoteproc,
	linux-kernel, devicetree, rnayak, Sibi Sankar

Use PDR helper functions to track the protection domains that the apr
services are dependent upon on SDM845 SoC, specifically the "avs/audio"
service running on ADSP Q6.

Signed-off-by: Sibi Sankar <sibis@codeaurora.org>
---
 drivers/soc/qcom/Kconfig     |   1 +
 drivers/soc/qcom/apr.c       | 100 +++++++++++++++++++++++++++++++----
 include/linux/soc/qcom/apr.h |   1 +
 3 files changed, 91 insertions(+), 11 deletions(-)

diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 5c4e76837f59b..cacfed945b275 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -202,6 +202,7 @@ config QCOM_APR
 	tristate "Qualcomm APR Bus (Asynchronous Packet Router)"
 	depends on ARCH_QCOM || COMPILE_TEST
 	depends on RPMSG
+	select QCOM_PDR_HELPERS
 	help
           Enable APR IPC protocol support between
           application processor and QDSP6. APR is
diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
index 4fcc32420c474..193cee77a9a09 100644
--- a/drivers/soc/qcom/apr.c
+++ b/drivers/soc/qcom/apr.c
@@ -11,6 +11,7 @@
 #include <linux/workqueue.h>
 #include <linux/of_device.h>
 #include <linux/soc/qcom/apr.h>
+#include <linux/soc/qcom/pdr.h>
 #include <linux/rpmsg.h>
 #include <linux/of.h>
 
@@ -21,6 +22,7 @@ struct apr {
 	spinlock_t rx_lock;
 	struct idr svcs_idr;
 	int dest_domain_id;
+	struct pdr_handle pdr;
 	struct workqueue_struct *rxwq;
 	struct work_struct rx_work;
 	struct list_head rx_list;
@@ -289,6 +291,9 @@ static int apr_add_device(struct device *dev, struct device_node *np,
 		  id->svc_id + 1, GFP_ATOMIC);
 	spin_unlock(&apr->svcs_lock);
 
+	of_property_read_string_index(np, "qcom,protection-domain",
+				      1, &adev->service_path);
+
 	dev_info(dev, "Adding APR dev: %s\n", dev_name(&adev->dev));
 
 	ret = device_register(&adev->dev);
@@ -300,14 +305,56 @@ static int apr_add_device(struct device *dev, struct device_node *np,
 	return ret;
 }
 
-static void of_register_apr_devices(struct device *dev)
+static void of_apr_add_pd_lookups(struct device *dev)
+{
+	const char *service_name, *service_path;
+	struct apr *apr = dev_get_drvdata(dev);
+	struct device_node *node;
+	int ret;
+
+	for_each_child_of_node(dev->of_node, node) {
+		ret = of_property_read_string_index(node, "qcom,protection-domain",
+						    0, &service_name);
+		if (ret < 0)
+			continue;
+
+		ret = of_property_read_string_index(node, "qcom,protection-domain",
+						    1, &service_path);
+		if (ret < 0)
+			continue;
+
+		ret = pdr_add_lookup(&apr->pdr, service_name, service_path);
+		if (ret && ret != -EALREADY)
+			dev_err(dev, "pdr add lookup failed: %d\n", ret);
+	}
+}
+
+static void of_register_apr_devices(struct device *dev, const char *svc_path)
 {
 	struct apr *apr = dev_get_drvdata(dev);
 	struct device_node *node;
+	const char *service_path;
+	int ret;
 
 	for_each_child_of_node(dev->of_node, node) {
 		struct apr_device_id id = { {0} };
 
+		ret = of_property_read_string_index(node, "qcom,protection-domain",
+						    1, &service_path);
+		if (svc_path) {
+			/* skip APR services that are PD independent */
+			if (ret)
+				continue;
+
+			/* skip APR services whose PD paths don't match */
+			if (strcmp(service_path, svc_path))
+				continue;
+		} else {
+			/* skip APR services whose PD lookups are registered*/
+			if (ret == 0)
+				continue;
+		}
+
 		if (of_property_read_u32(node, "reg", &id.svc_id))
 			continue;
 
@@ -318,6 +365,37 @@ static void of_register_apr_devices(struct device *dev)
 	}
 }
 
+static int apr_remove_device(struct device *dev, void *svc_path)
+{
+	struct apr_device *adev = to_apr_device(dev);
+
+	if (svc_path) {
+		if (!strcmp(adev->service_path, (char *)svc_path))
+			device_unregister(&adev->dev);
+	} else {
+		device_unregister(&adev->dev);
+	}
+
+	return 0;
+}
+
+static int apr_pd_status(struct pdr_handle *pdr, struct pdr_service *pds)
+{
+	struct apr *apr = container_of(pdr, struct apr, pdr);
+
+	switch (pds->state) {
+	case SERVREG_SERVICE_STATE_UP:
+		of_register_apr_devices(apr->dev, pds->service_path);
+		break;
+	case SERVREG_SERVICE_STATE_DOWN:
+		device_for_each_child(apr->dev, pds->service_path,
+				      apr_remove_device);
+		break;
+	}
+
+	return 0;
+}
+
 static int apr_probe(struct rpmsg_device *rpdev)
 {
 	struct device *dev = &rpdev->dev;
@@ -343,20 +421,19 @@ static int apr_probe(struct rpmsg_device *rpdev)
 		return -ENOMEM;
 	}
 	INIT_WORK(&apr->rx_work, apr_rxwq);
+
+	ret = pdr_handle_init(&apr->pdr, apr_pd_status);
+	if (ret) {
+		dev_err(dev, "Failed to init PDR handle\n");
+		return ret;
+	}
+
 	INIT_LIST_HEAD(&apr->rx_list);
 	spin_lock_init(&apr->rx_lock);
 	spin_lock_init(&apr->svcs_lock);
 	idr_init(&apr->svcs_idr);
-	of_register_apr_devices(dev);
-
-	return 0;
-}
-
-static int apr_remove_device(struct device *dev, void *null)
-{
-	struct apr_device *adev = to_apr_device(dev);
-
-	device_unregister(&adev->dev);
+	of_apr_add_pd_lookups(dev);
+	of_register_apr_devices(dev, NULL);
 
 	return 0;
 }
@@ -365,6 +442,7 @@ static void apr_remove(struct rpmsg_device *rpdev)
 {
 	struct apr *apr = dev_get_drvdata(&rpdev->dev);
 
+	pdr_handle_release(&apr->pdr);
 	device_for_each_child(&rpdev->dev, NULL, apr_remove_device);
 	flush_workqueue(apr->rxwq);
 	destroy_workqueue(apr->rxwq);
diff --git a/include/linux/soc/qcom/apr.h b/include/linux/soc/qcom/apr.h
index c5d52e2cb275f..7f0bc3cf4d610 100644
--- a/include/linux/soc/qcom/apr.h
+++ b/include/linux/soc/qcom/apr.h
@@ -85,6 +85,7 @@ struct apr_device {
 	uint16_t	domain_id;
 	uint32_t	version;
 	char name[APR_NAME_SIZE];
+	const char *service_path;
 	spinlock_t	lock;
 	struct list_head node;
 };
-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/3] dt-bindings: soc: qcom: apr: Add protection domain bindings
       [not found] ` <0101016e7ee9c786-fcf80f4e-9b57-4d6b-8806-9ca408e21b55-000000@us-west-2.amazonses.com>
@ 2019-11-19  5:49   ` Bjorn Andersson
  2019-11-21 16:04   ` Srinivas Kandagatla
  1 sibling, 0 replies; 12+ messages in thread
From: Bjorn Andersson @ 2019-11-19  5:49 UTC (permalink / raw)
  To: Sibi Sankar
  Cc: srinivas.kandagatla, robh+dt, tsoni, agross, mark.rutland,
	linux-arm-msm, linux-remoteproc, linux-kernel, devicetree,
	rnayak

On Mon 18 Nov 06:28 PST 2019, Sibi Sankar wrote:

> Add optional "qcom,protection-domain" bindings for APR services. This
> helps to capture the dependencies between APR services and the PD on
> which each apr service run.
> 

Reviewed-by: Bjorn Andersson <bjorn.andersson@linaro.org>

> Signed-off-by: Sibi Sankar <sibis@codeaurora.org>
> ---
>  .../devicetree/bindings/soc/qcom/qcom,apr.txt | 59 +++++++++++++++++++
>  1 file changed, 59 insertions(+)
> 
> diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt
> index db501269f47b8..f87c0b2a48de4 100644
> --- a/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt
> +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt
> @@ -45,6 +45,12 @@ by the individual bindings for the specific service
>  			12 - Ultrasound stream manager.
>  			13 - Listen stream manager.
>  
> +- qcom,protection-domain
> +	Usage: optional
> +	Value type: <stringlist>
> +	Definition: Must list the protection domain service name and path
> +		    that the particular apr service has a dependency on.
> +
>  = EXAMPLE
>  The following example represents a QDSP based sound card on a MSM8996 device
>  which uses apr as communication between Apps and QDSP.
> @@ -82,3 +88,56 @@ which uses apr as communication between Apps and QDSP.
>  			...
>  		};
>  	};
> +
> += EXAMPLE 2
> +The following example represents a QDSP based sound card on SDM845 device.
> +Here the apr services are dependent on "avs/audio" service running on AUDIO
> +Protection Domain hosted on ADSP remote processor.
> +
> +	apr {
> +		compatible = "qcom,apr-v2";
> +		qcom,glink-channels = "apr_audio_svc";
> +		qcom,apr-domain = <APR_DOMAIN_ADSP>;
> +
> +		q6core {
> +			compatible = "qcom,q6core";
> +			reg = <APR_SVC_ADSP_CORE>;
> +			qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
> +		};
> +
> +		q6afe: q6afe {
> +			compatible = "qcom,q6afe";
> +			reg = <APR_SVC_AFE>;
> +			qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
> +			q6afedai: dais {
> +				compatible = "qcom,q6afe-dais";
> +				#sound-dai-cells = <1>;
> +
> +				qi2s@22 {
> +					reg = <22>;
> +					qcom,sd-lines = <3>;
> +				};
> +			};
> +		};
> +
> +		q6asm: q6asm {
> +			compatible = "qcom,q6asm";
> +			reg = <APR_SVC_ASM>;
> +			qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
> +			q6asmdai: dais {
> +				compatible = "qcom,q6asm-dais";
> +				#sound-dai-cells = <1>;
> +				iommus = <&apps_smmu 0x1821 0x0>;
> +			};
> +		};
> +
> +		q6adm: q6adm {
> +			compatible = "qcom,q6adm";
> +			reg = <APR_SVC_ADM>;
> +			qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
> +			q6routing: routing {
> +				compatible = "qcom,q6adm-routing";
> +				#sound-dai-cells = <0>;
> +			};
> +		};
> +	};
> -- 
> The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
> a Linux Foundation Collaborative Project
> 

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/3] soc: qcom: Introduce Protection Domain Restart helpers
       [not found] ` <0101016e7ee9be5e-1d6bbe06-4bab-434d-9040-ebfa3918b213-000000@us-west-2.amazonses.com>
@ 2019-11-19  6:40   ` Bjorn Andersson
  2019-11-19 10:18     ` sibis
       [not found]     ` <0101016e832bd54d-453473ee-c0fa-44f5-a873-55b97dff4a9a-000000@us-west-2.amazonses.com>
  0 siblings, 2 replies; 12+ messages in thread
From: Bjorn Andersson @ 2019-11-19  6:40 UTC (permalink / raw)
  To: Sibi Sankar
  Cc: srinivas.kandagatla, robh+dt, tsoni, agross, mark.rutland,
	linux-arm-msm, linux-remoteproc, linux-kernel, devicetree,
	rnayak

On Mon 18 Nov 06:27 PST 2019, Sibi Sankar wrote:
> diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
[..]
> +static void pdr_indack_work(struct work_struct *work)
> +{
> +	struct pdr_handle *pdr = container_of(work, struct pdr_handle,
> +					      indack_work);
> +	struct pdr_list_node *ind, *tmp;
> +	struct pdr_service *pds;
> +
> +	list_for_each_entry_safe(ind, tmp, &pdr->indack_list, node) {
> +		pds = ind->pds;
> +		pdr_send_indack_msg(pdr, pds, ind->transaction_id);

So when we et a ind_cb with the new status, we need to send an ack
request, which will result in a response, just to confirm that we got
the event?

Seems like we should fix the qmi code to make it possible to send a
request from the indication handler and then we could simply ignore the
response. Or do we need to not pdr->status() until we get the response
for some reason?


Regardless, I'm fine with scheduling this for now...

> +		pdr->status(pdr, pds);
> +		list_del(&ind->node);
> +		kfree(ind);
> +	}
> +}
> +
> +static void pdr_servreg_ind_cb(struct qmi_handle *qmi,
> +			       struct sockaddr_qrtr *sq,
> +			       struct qmi_txn *txn, const void *data)
> +{
> +	struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
> +					      servreg_client);
> +	const struct servreg_state_updated_ind *ind_msg = data;
> +	struct pdr_list_node *ind;
> +	struct pdr_service *pds;
> +
> +	if (!ind_msg || !ind_msg->service_path ||
> +	    strlen(ind_msg->service_path) > (SERVREG_NAME_LENGTH + 1))
> +		return;
> +
> +	list_for_each_entry(pds, &pdr->lookups, node) {
> +		if (!strcmp(pds->service_path, ind_msg->service_path))
> +			goto found;
> +	}
> +	return;
> +
> +found:
> +	pds->state = ind_msg->curr_state;
> +
> +	ind = kzalloc(sizeof(*ind), GFP_KERNEL);
> +	if (!ind)
> +		return;
> +
> +	pr_info("PDR: Indication received from %s, state: 0x%x, trans-id: %d\n",
> +		ind_msg->service_path, ind_msg->curr_state,
> +		ind_msg->transaction_id);
> +
> +	ind->transaction_id = ind_msg->transaction_id;
> +	ind->pds = pds;
> +
> +	mutex_lock(&pdr->list_lock);
> +	list_add_tail(&ind->node, &pdr->indack_list);
> +	mutex_unlock(&pdr->list_lock);
> +
> +	queue_work(pdr->indack_wq, &pdr->indack_work);
> +}
> +
> +static struct qmi_msg_handler qmi_indication_handler[] = {
> +	{
> +		.type = QMI_INDICATION,
> +		.msg_id = SERVREG_STATE_UPDATED_IND_ID,
> +		.ei = servreg_state_updated_ind_ei,
> +		.decoded_size = sizeof(struct servreg_state_updated_ind),
> +		.fn = pdr_servreg_ind_cb,
> +	},
> +	{}
> +};
> +
> +static int pdr_get_domain_list(struct servreg_get_domain_list_req *req,
> +			       struct servreg_get_domain_list_resp *resp,
> +			       struct pdr_handle *pdr)
> +{
> +	struct qmi_txn txn;
> +	int ret;
> +
> +	ret = qmi_txn_init(&pdr->servloc_client, &txn,
> +			   servreg_get_domain_list_resp_ei, resp);
> +	if (ret < 0)
> +		return ret;
> +
> +	ret = qmi_send_request(&pdr->servloc_client,
> +			       &pdr->servloc_addr,
> +			       &txn, SERVREG_GET_DOMAIN_LIST_REQ,
> +			       SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN,
> +			       servreg_get_domain_list_req_ei,
> +			       req);
> +	if (ret < 0) {
> +		qmi_txn_cancel(&txn);
> +		return ret;
> +	}
> +
> +	ret = qmi_txn_wait(&txn, 5 * HZ);
> +	if (ret < 0) {
> +		pr_err("PDR: %s get domain list txn wait failed: %d\n",
> +		       req->service_name, ret);
> +		return ret;
> +	}
> +
> +	/* Check the response */
> +	if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
> +		pr_err("PDR: %s get domain list failed: 0x%x\n",
> +		       req->service_name, resp->resp.error);
> +		return -EREMOTEIO;
> +	}
> +
> +	return ret;

ret here will be the number of bytes decoded, but you really only care
about if this was an error or not. So I would suggest that you just
return 0 here.

> +}
> +
> +static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds)
> +{
> +	struct servreg_get_domain_list_resp *resp = NULL;
> +	struct servreg_get_domain_list_req req;
> +	int db_rev_count = 0, domains_read = 0;
> +	struct servreg_location_entry *entry;
> +	int ret, i;
> +
> +	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
> +	if (!resp)
> +		return -ENOMEM;
> +
> +	/* Prepare req message */
> +	strcpy(req.service_name, pds->service_name);
> +	req.domain_offset_valid = true;
> +	req.domain_offset = 0;
> +
> +	do {
> +		req.domain_offset = domains_read;
> +		ret = pdr_get_domain_list(&req, resp, pdr);
> +		if (ret < 0)
> +			goto out;
> +
> +		if (!domains_read)
> +			db_rev_count = resp->db_rev_count;
> +
> +		if (db_rev_count != resp->db_rev_count) {
> +			ret = -EAGAIN;
> +			goto out;
> +		}
> +
> +		for (i = domains_read; i < resp->domain_list_len; i++) {
> +			entry = &resp->domain_list[i];
> +
> +			if (strlen(entry->name) > (SERVREG_NAME_LENGTH + 1))

In the event that the incoming string isn't NUL-terminated this will run
off the array.

if (strnlen(entry->name, SERVREG_NAME_LENGTH + 1) == SERVREG_NAME_LENGTH + 1)

or perhaps, relying on sizeof instead of duplicating the knowledge that
it is SERVREG_NAME_LENGTH + 1:

if (strnlen(entry->name, sizeof(entry->name)) == sizeof(entry->name))

> +				continue;
> +
> +			if (!strcmp(entry->name, pds->service_path)) {
> +				pds->service_data_valid = entry->service_data_valid;
> +				pds->service_data = entry->service_data;
> +				pds->instance = entry->instance;
> +				goto out;
> +			}
> +		}
> +
> +		/* Update ret to indicate that the service is not yet found */
> +		ret = -EINVAL;
> +
> +		/* Always read total_domains from the response msg */
> +		if (resp->domain_list_len >  resp->total_domains)

Double space after '>'

> +			resp->domain_list_len = resp->total_domains;
> +
> +		domains_read += resp->domain_list_len;
> +	} while (domains_read < resp->total_domains);
> +out:
> +	kfree(resp);
> +	return ret;
> +}
> +
> +static void pdr_servloc_work(struct work_struct *work)
> +{
> +	struct pdr_handle *pdr = container_of(work, struct pdr_handle,
> +					      servloc_work);
> +	struct pdr_list_node *servloc, *tmp;
> +	struct pdr_service *pds;
> +	int ret;
> +
> +	list_for_each_entry_safe(servloc, tmp, &pdr->servloc_list, node) {
> +		pds = servloc->pds;
> +
> +		/* wait for PD Mapper to come up */
> +		ret = wait_for_completion_timeout(&pdr->locator_available, 10 * HZ);

Afaict this means that we will only look for the locator during the 10
seconds that follows a pdr_add_lookup().

How about changing this so that you bail before the loop if the locator
hasn't showed up yet and schedule this worker when the locator is
registered?

> +		if (!ret) {
> +			pr_err("PDR: SERVICE LOCATOR service wait failed\n");
> +			ret = -ETIMEDOUT;
> +			goto err;
> +		}
> +
> +		ret = pdr_locate_service(pdr, pds);
> +		if (ret < 0) {
> +			pr_err("PDR: service lookup for %s failed: %d\n",
> +			       pds->service_name, ret);
> +			goto err;
> +		}
> +
> +		qmi_add_lookup(&pdr->servreg_client, pds->service, 1,
> +			       pds->instance);
> +err:
> +		list_del(&servloc->node);
> +		kfree(servloc);
> +
> +		/* cleanup pds on error */
> +		if (ret < 0) {
> +			pds->state = SERVREG_LOCATOR_ERR;
> +			pdr->status(pdr, pds);
> +			list_del(&pds->node);
> +			kfree(pds);
> +		}
> +	}
> +}
[..]
> +int pdr_add_lookup(struct pdr_handle *pdr, const char *service_name,
> +		   const char *service_path)
> +{
> +	struct pdr_service *pds, *pds_iter, *tmp;
> +	struct pdr_list_node *servloc;
> +	int ret;
> +
> +	if (!service_name || strlen(service_name) > (SERVREG_NAME_LENGTH + 1) ||
> +	    !service_path || strlen(service_path) > (SERVREG_NAME_LENGTH + 1))

When strlen(x) == SERVREG_NAME_LENGTH + 1 your strcpy below would write
SERVREG_NAME_LENGTH + 2 bytes to service_name and service_path, so drop
the + 1 from the comparisons.

> +		return -EINVAL;
> +
> +	servloc = kzalloc(sizeof(*servloc), GFP_KERNEL);
> +	if (!servloc)
> +		return -ENOMEM;
> +
> +	pds = kzalloc(sizeof(*pds), GFP_KERNEL);
> +	if (!pds) {
> +		ret = -ENOMEM;
> +		goto err;
> +	}
> +
> +	pds->service = SERVREG_NOTIFIER_SERVICE;
> +	strcpy(pds->service_name, service_name);
> +	strcpy(pds->service_path, service_path);
[..]
> +int pdr_restart_pd(struct pdr_handle *pdr, const char *service_path)
> +{
> +	struct servreg_restart_pd_req req;
> +	struct servreg_restart_pd_resp resp;
> +	struct pdr_service *pds = NULL, *pds_iter, *tmp;
> +	struct qmi_txn txn;
> +	int ret;
> +
> +	if (!service_path || strlen(service_path) > (SERVREG_NAME_LENGTH + 1))

As above, drop the + 1

> +		return -EINVAL;
> +
[..]
> +int pdr_handle_init(struct pdr_handle *pdr,
> +		    int (*status)(struct pdr_handle *pdr,
> +				  struct pdr_service *pds))
> +{
[..]
> +	pdr->servreg_wq = create_singlethread_workqueue("pdr_servreg_wq");
> +	if (!pdr->servreg_wq)
> +		return -ENOMEM;
> +
> +	pdr->indack_wq = alloc_ordered_workqueue("pdr_indack_wq", WQ_HIGHPRI);

The two workqueues means that we should be able to call pdr->status()
rom two concurrent contexts, I don't think our clients will expect that.

> +	if (!pdr->indack_wq) {
> +		ret = -ENOMEM;
> +		goto destroy_servreg;
> +	}
> +

Regards,
Bjorn

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 3/3] soc: qcom: apr: Add avs/audio tracking functionality
       [not found] ` <0101016e7ee9d8b5-9759d0ba-4acf-4fc4-a863-fac9c738397f-000000@us-west-2.amazonses.com>
@ 2019-11-19  6:53   ` Bjorn Andersson
  2019-11-19 10:25     ` sibis
  0 siblings, 1 reply; 12+ messages in thread
From: Bjorn Andersson @ 2019-11-19  6:53 UTC (permalink / raw)
  To: Sibi Sankar
  Cc: srinivas.kandagatla, robh+dt, tsoni, agross, mark.rutland,
	linux-arm-msm, linux-remoteproc, linux-kernel, devicetree,
	rnayak

On Mon 18 Nov 06:28 PST 2019, Sibi Sankar wrote:
> diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
[..]
> +static void of_register_apr_devices(struct device *dev, const char *svc_path)
>  {
>  	struct apr *apr = dev_get_drvdata(dev);
>  	struct device_node *node;
> +	const char *service_path;
> +	int ret;
>  
>  	for_each_child_of_node(dev->of_node, node) {
>  		struct apr_device_id id = { {0} };
>  
> +		ret = of_property_read_string_index(node, "qcom,protection-domain",
> +						    1, &service_path);
> +		if (svc_path) {
> +			/* skip APR services that are PD independent */
> +			if (ret)
> +				continue;
> +
> +			/* skip APR services whose PD paths don't match */
> +			if (strcmp(service_path, svc_path))
> +				continue;
> +		} else {
> +			/* skip APR services whose PD lookups are registered*/

Missing space before */

> +			if (ret == 0)
> +				continue;
> +		}
> +
>  		if (of_property_read_u32(node, "reg", &id.svc_id))
>  			continue;
>  
> @@ -318,6 +365,37 @@ static void of_register_apr_devices(struct device *dev)
>  	}
>  }
>  
> +static int apr_remove_device(struct device *dev, void *svc_path)
> +{
> +	struct apr_device *adev = to_apr_device(dev);
> +
> +	if (svc_path) {
> +		if (!strcmp(adev->service_path, (char *)svc_path))
> +			device_unregister(&adev->dev);
> +	} else {
> +		device_unregister(&adev->dev);
> +	}
> +
> +	return 0;
> +}
> +
> +static int apr_pd_status(struct pdr_handle *pdr, struct pdr_service *pds)

Why is the pdr status function returning an int?

> +{
> +	struct apr *apr = container_of(pdr, struct apr, pdr);
> +
> +	switch (pds->state) {
> +	case SERVREG_SERVICE_STATE_UP:
> +		of_register_apr_devices(apr->dev, pds->service_path);
> +		break;
> +	case SERVREG_SERVICE_STATE_DOWN:
> +		device_for_each_child(apr->dev, pds->service_path,
> +				      apr_remove_device);
> +		break;
> +	}
> +
> +	return 0;
> +}
[..]
> @@ -343,20 +421,19 @@ static int apr_probe(struct rpmsg_device *rpdev)
>  		return -ENOMEM;
>  	}
>  	INIT_WORK(&apr->rx_work, apr_rxwq);
> +
> +	ret = pdr_handle_init(&apr->pdr, apr_pd_status);
> +	if (ret) {
> +		dev_err(dev, "Failed to init PDR handle\n");

You need to destroy apr->rxwq here as well.

> +		return ret;
> +	}
> +
>  	INIT_LIST_HEAD(&apr->rx_list);
>  	spin_lock_init(&apr->rx_lock);
>  	spin_lock_init(&apr->svcs_lock);
>  	idr_init(&apr->svcs_idr);
> -	of_register_apr_devices(dev);
> -
> -	return 0;
> -}

Regards,
Bjorn

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/3] soc: qcom: Introduce Protection Domain Restart helpers
  2019-11-19  6:40   ` [PATCH 1/3] soc: qcom: Introduce Protection Domain Restart helpers Bjorn Andersson
@ 2019-11-19 10:18     ` sibis
       [not found]     ` <0101016e832bd54d-453473ee-c0fa-44f5-a873-55b97dff4a9a-000000@us-west-2.amazonses.com>
  1 sibling, 0 replies; 12+ messages in thread
From: sibis @ 2019-11-19 10:18 UTC (permalink / raw)
  To: Bjorn Andersson
  Cc: srinivas.kandagatla, robh+dt, tsoni, agross, mark.rutland,
	linux-arm-msm, linux-remoteproc, linux-kernel, devicetree,
	rnayak

Hey Bjorn,
Thanks for taking the time to
review the series :)

On 2019-11-19 12:10, Bjorn Andersson wrote:
> On Mon 18 Nov 06:27 PST 2019, Sibi Sankar wrote:
>> diff --git a/drivers/soc/qcom/pdr_interface.c 
>> b/drivers/soc/qcom/pdr_interface.c
> [..]
>> +static void pdr_indack_work(struct work_struct *work)
>> +{
>> +	struct pdr_handle *pdr = container_of(work, struct pdr_handle,
>> +					      indack_work);
>> +	struct pdr_list_node *ind, *tmp;
>> +	struct pdr_service *pds;
>> +
>> +	list_for_each_entry_safe(ind, tmp, &pdr->indack_list, node) {
>> +		pds = ind->pds;
>> +		pdr_send_indack_msg(pdr, pds, ind->transaction_id);
> 
> So when we et a ind_cb with the new status, we need to send an ack
> request, which will result in a response, just to confirm that we got
> the event?
> 
> Seems like we should fix the qmi code to make it possible to send a
> request from the indication handler and then we could simply ignore the

yeah maybe having a provision
to send custom requests back on
indication would be the way
to go. Not all indication need
to be services with requests.

> response. Or do we need to not pdr->status() until we get the response
> for some reason?

adsp waits on the ack response for
a fixed duration and seems to throw
a fatal err is the ack is not
serviced. Hence holding back pd->status
till we service the ack here.

> 
> 
> Regardless, I'm fine with scheduling this for now...
> 
>> +		pdr->status(pdr, pds);
>> +		list_del(&ind->node);
>> +		kfree(ind);
>> +	}
>> +}
>> +
>> +static void pdr_servreg_ind_cb(struct qmi_handle *qmi,
>> +			       struct sockaddr_qrtr *sq,
>> +			       struct qmi_txn *txn, const void *data)
>> +{
>> +	struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
>> +					      servreg_client);
>> +	const struct servreg_state_updated_ind *ind_msg = data;
>> +	struct pdr_list_node *ind;
>> +	struct pdr_service *pds;
>> +
>> +	if (!ind_msg || !ind_msg->service_path ||
>> +	    strlen(ind_msg->service_path) > (SERVREG_NAME_LENGTH + 1))
>> +		return;
>> +
>> +	list_for_each_entry(pds, &pdr->lookups, node) {
>> +		if (!strcmp(pds->service_path, ind_msg->service_path))
>> +			goto found;
>> +	}
>> +	return;
>> +
>> +found:
>> +	pds->state = ind_msg->curr_state;
>> +
>> +	ind = kzalloc(sizeof(*ind), GFP_KERNEL);
>> +	if (!ind)
>> +		return;
>> +
>> +	pr_info("PDR: Indication received from %s, state: 0x%x, trans-id: 
>> %d\n",
>> +		ind_msg->service_path, ind_msg->curr_state,
>> +		ind_msg->transaction_id);
>> +
>> +	ind->transaction_id = ind_msg->transaction_id;
>> +	ind->pds = pds;
>> +
>> +	mutex_lock(&pdr->list_lock);
>> +	list_add_tail(&ind->node, &pdr->indack_list);
>> +	mutex_unlock(&pdr->list_lock);
>> +
>> +	queue_work(pdr->indack_wq, &pdr->indack_work);
>> +}
>> +
>> +static struct qmi_msg_handler qmi_indication_handler[] = {
>> +	{
>> +		.type = QMI_INDICATION,
>> +		.msg_id = SERVREG_STATE_UPDATED_IND_ID,
>> +		.ei = servreg_state_updated_ind_ei,
>> +		.decoded_size = sizeof(struct servreg_state_updated_ind),
>> +		.fn = pdr_servreg_ind_cb,
>> +	},
>> +	{}
>> +};
>> +
>> +static int pdr_get_domain_list(struct servreg_get_domain_list_req 
>> *req,
>> +			       struct servreg_get_domain_list_resp *resp,
>> +			       struct pdr_handle *pdr)
>> +{
>> +	struct qmi_txn txn;
>> +	int ret;
>> +
>> +	ret = qmi_txn_init(&pdr->servloc_client, &txn,
>> +			   servreg_get_domain_list_resp_ei, resp);
>> +	if (ret < 0)
>> +		return ret;
>> +
>> +	ret = qmi_send_request(&pdr->servloc_client,
>> +			       &pdr->servloc_addr,
>> +			       &txn, SERVREG_GET_DOMAIN_LIST_REQ,
>> +			       SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN,
>> +			       servreg_get_domain_list_req_ei,
>> +			       req);
>> +	if (ret < 0) {
>> +		qmi_txn_cancel(&txn);
>> +		return ret;
>> +	}
>> +
>> +	ret = qmi_txn_wait(&txn, 5 * HZ);
>> +	if (ret < 0) {
>> +		pr_err("PDR: %s get domain list txn wait failed: %d\n",
>> +		       req->service_name, ret);
>> +		return ret;
>> +	}
>> +
>> +	/* Check the response */
>> +	if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
>> +		pr_err("PDR: %s get domain list failed: 0x%x\n",
>> +		       req->service_name, resp->resp.error);
>> +		return -EREMOTEIO;
>> +	}
>> +
>> +	return ret;
> 
> ret here will be the number of bytes decoded, but you really only care
> about if this was an error or not. So I would suggest that you just
> return 0 here.

yeah will return 0

> 
>> +}
>> +
>> +static int pdr_locate_service(struct pdr_handle *pdr, struct 
>> pdr_service *pds)
>> +{
>> +	struct servreg_get_domain_list_resp *resp = NULL;
>> +	struct servreg_get_domain_list_req req;
>> +	int db_rev_count = 0, domains_read = 0;
>> +	struct servreg_location_entry *entry;
>> +	int ret, i;
>> +
>> +	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
>> +	if (!resp)
>> +		return -ENOMEM;
>> +
>> +	/* Prepare req message */
>> +	strcpy(req.service_name, pds->service_name);
>> +	req.domain_offset_valid = true;
>> +	req.domain_offset = 0;
>> +
>> +	do {
>> +		req.domain_offset = domains_read;
>> +		ret = pdr_get_domain_list(&req, resp, pdr);
>> +		if (ret < 0)
>> +			goto out;
>> +
>> +		if (!domains_read)
>> +			db_rev_count = resp->db_rev_count;
>> +
>> +		if (db_rev_count != resp->db_rev_count) {
>> +			ret = -EAGAIN;
>> +			goto out;
>> +		}
>> +
>> +		for (i = domains_read; i < resp->domain_list_len; i++) {
>> +			entry = &resp->domain_list[i];
>> +
>> +			if (strlen(entry->name) > (SERVREG_NAME_LENGTH + 1))
> 
> In the event that the incoming string isn't NUL-terminated this will 
> run
> off the array.
> 
> if (strnlen(entry->name, SERVREG_NAME_LENGTH + 1) == 
> SERVREG_NAME_LENGTH + 1)
> 
> or perhaps, relying on sizeof instead of duplicating the knowledge that
> it is SERVREG_NAME_LENGTH + 1:
> 
> if (strnlen(entry->name, sizeof(entry->name)) == sizeof(entry->name))

yeah I'll use ^^ then or maybe switch
to a strncpy to further simplify things.

> 
>> +				continue;
>> +
>> +			if (!strcmp(entry->name, pds->service_path)) {
>> +				pds->service_data_valid = entry->service_data_valid;
>> +				pds->service_data = entry->service_data;
>> +				pds->instance = entry->instance;
>> +				goto out;
>> +			}
>> +		}
>> +
>> +		/* Update ret to indicate that the service is not yet found */
>> +		ret = -EINVAL;
>> +
>> +		/* Always read total_domains from the response msg */
>> +		if (resp->domain_list_len >  resp->total_domains)
> 
> Double space after '>'

thanks for catching this

> 
>> +			resp->domain_list_len = resp->total_domains;
>> +
>> +		domains_read += resp->domain_list_len;
>> +	} while (domains_read < resp->total_domains);
>> +out:
>> +	kfree(resp);
>> +	return ret;
>> +}
>> +
>> +static void pdr_servloc_work(struct work_struct *work)
>> +{
>> +	struct pdr_handle *pdr = container_of(work, struct pdr_handle,
>> +					      servloc_work);
>> +	struct pdr_list_node *servloc, *tmp;
>> +	struct pdr_service *pds;
>> +	int ret;
>> +
>> +	list_for_each_entry_safe(servloc, tmp, &pdr->servloc_list, node) {
>> +		pds = servloc->pds;
>> +
>> +		/* wait for PD Mapper to come up */
>> +		ret = wait_for_completion_timeout(&pdr->locator_available, 10 * 
>> HZ);
> 
> Afaict this means that we will only look for the locator during the 10
> seconds that follows a pdr_add_lookup().
> 
> How about changing this so that you bail before the loop if the locator
> hasn't showed up yet and schedule this worker when the locator is
> registered?

yes makes sense, will do that.
But by doing this the client
will have to track timeouts
for lookups.

> 
>> +		if (!ret) {
>> +			pr_err("PDR: SERVICE LOCATOR service wait failed\n");
>> +			ret = -ETIMEDOUT;
>> +			goto err;
>> +		}
>> +
>> +		ret = pdr_locate_service(pdr, pds);
>> +		if (ret < 0) {
>> +			pr_err("PDR: service lookup for %s failed: %d\n",
>> +			       pds->service_name, ret);
>> +			goto err;
>> +		}
>> +
>> +		qmi_add_lookup(&pdr->servreg_client, pds->service, 1,
>> +			       pds->instance);
>> +err:
>> +		list_del(&servloc->node);
>> +		kfree(servloc);
>> +
>> +		/* cleanup pds on error */
>> +		if (ret < 0) {
>> +			pds->state = SERVREG_LOCATOR_ERR;
>> +			pdr->status(pdr, pds);
>> +			list_del(&pds->node);
>> +			kfree(pds);
>> +		}
>> +	}
>> +}
> [..]
>> +int pdr_add_lookup(struct pdr_handle *pdr, const char *service_name,
>> +		   const char *service_path)
>> +{
>> +	struct pdr_service *pds, *pds_iter, *tmp;
>> +	struct pdr_list_node *servloc;
>> +	int ret;
>> +
>> +	if (!service_name || strlen(service_name) > (SERVREG_NAME_LENGTH + 
>> 1) ||
>> +	    !service_path || strlen(service_path) > (SERVREG_NAME_LENGTH + 
>> 1))
> 
> When strlen(x) == SERVREG_NAME_LENGTH + 1 your strcpy below would write
> SERVREG_NAME_LENGTH + 2 bytes to service_name and service_path, so drop
> the + 1 from the comparisons.

yes will do that

> 
>> +		return -EINVAL;
>> +
>> +	servloc = kzalloc(sizeof(*servloc), GFP_KERNEL);
>> +	if (!servloc)
>> +		return -ENOMEM;
>> +
>> +	pds = kzalloc(sizeof(*pds), GFP_KERNEL);
>> +	if (!pds) {
>> +		ret = -ENOMEM;
>> +		goto err;
>> +	}
>> +
>> +	pds->service = SERVREG_NOTIFIER_SERVICE;
>> +	strcpy(pds->service_name, service_name);
>> +	strcpy(pds->service_path, service_path);
> [..]
>> +int pdr_restart_pd(struct pdr_handle *pdr, const char *service_path)
>> +{
>> +	struct servreg_restart_pd_req req;
>> +	struct servreg_restart_pd_resp resp;
>> +	struct pdr_service *pds = NULL, *pds_iter, *tmp;
>> +	struct qmi_txn txn;
>> +	int ret;
>> +
>> +	if (!service_path || strlen(service_path) > (SERVREG_NAME_LENGTH + 
>> 1))
> 
> As above, drop the + 1

ditto

> 
>> +		return -EINVAL;
>> +
> [..]
>> +int pdr_handle_init(struct pdr_handle *pdr,
>> +		    int (*status)(struct pdr_handle *pdr,
>> +				  struct pdr_service *pds))
>> +{
> [..]
>> +	pdr->servreg_wq = create_singlethread_workqueue("pdr_servreg_wq");
>> +	if (!pdr->servreg_wq)
>> +		return -ENOMEM;
>> +
>> +	pdr->indack_wq = alloc_ordered_workqueue("pdr_indack_wq", 
>> WQ_HIGHPRI);
> 
> The two workqueues means that we should be able to call pdr->status()
> rom two concurrent contexts, I don't think our clients will expect 
> that.
> 

would creating another ordered wq to
relay all the pd->status make sense?

>> +	if (!pdr->indack_wq) {
>> +		ret = -ENOMEM;
>> +		goto destroy_servreg;
>> +	}
>> +
> 
> Regards,
> Bjorn

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 3/3] soc: qcom: apr: Add avs/audio tracking functionality
  2019-11-19  6:53   ` [PATCH 3/3] soc: qcom: apr: Add avs/audio tracking functionality Bjorn Andersson
@ 2019-11-19 10:25     ` sibis
  0 siblings, 0 replies; 12+ messages in thread
From: sibis @ 2019-11-19 10:25 UTC (permalink / raw)
  To: Bjorn Andersson
  Cc: srinivas.kandagatla, robh+dt, tsoni, agross, mark.rutland,
	linux-arm-msm, linux-remoteproc, linux-kernel, devicetree,
	rnayak

On 2019-11-19 12:23, Bjorn Andersson wrote:
> On Mon 18 Nov 06:28 PST 2019, Sibi Sankar wrote:
>> diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
> [..]
>> +static void of_register_apr_devices(struct device *dev, const char 
>> *svc_path)
>>  {
>>  	struct apr *apr = dev_get_drvdata(dev);
>>  	struct device_node *node;
>> +	const char *service_path;
>> +	int ret;
>> 
>>  	for_each_child_of_node(dev->of_node, node) {
>>  		struct apr_device_id id = { {0} };
>> 
>> +		ret = of_property_read_string_index(node, "qcom,protection-domain",
>> +						    1, &service_path);
>> +		if (svc_path) {
>> +			/* skip APR services that are PD independent */
>> +			if (ret)
>> +				continue;
>> +
>> +			/* skip APR services whose PD paths don't match */
>> +			if (strcmp(service_path, svc_path))
>> +				continue;
>> +		} else {
>> +			/* skip APR services whose PD lookups are registered*/
> 
> Missing space before */

Thanks will add it

> 
>> +			if (ret == 0)
>> +				continue;
>> +		}
>> +
>>  		if (of_property_read_u32(node, "reg", &id.svc_id))
>>  			continue;
>> 
>> @@ -318,6 +365,37 @@ static void of_register_apr_devices(struct device 
>> *dev)
>>  	}
>>  }
>> 
>> +static int apr_remove_device(struct device *dev, void *svc_path)
>> +{
>> +	struct apr_device *adev = to_apr_device(dev);
>> +
>> +	if (svc_path) {
>> +		if (!strcmp(adev->service_path, (char *)svc_path))
>> +			device_unregister(&adev->dev);
>> +	} else {
>> +		device_unregister(&adev->dev);
>> +	}
>> +
>> +	return 0;
>> +}
>> +
>> +static int apr_pd_status(struct pdr_handle *pdr, struct pdr_service 
>> *pds)
> 
> Why is the pdr status function returning an int?

yes since I am not using the
return value in pdr_helpers
will make it void.

> 
>> +{
>> +	struct apr *apr = container_of(pdr, struct apr, pdr);
>> +
>> +	switch (pds->state) {
>> +	case SERVREG_SERVICE_STATE_UP:
>> +		of_register_apr_devices(apr->dev, pds->service_path);
>> +		break;
>> +	case SERVREG_SERVICE_STATE_DOWN:
>> +		device_for_each_child(apr->dev, pds->service_path,
>> +				      apr_remove_device);
>> +		break;
>> +	}
>> +
>> +	return 0;
>> +}
> [..]
>> @@ -343,20 +421,19 @@ static int apr_probe(struct rpmsg_device *rpdev)
>>  		return -ENOMEM;
>>  	}
>>  	INIT_WORK(&apr->rx_work, apr_rxwq);
>> +
>> +	ret = pdr_handle_init(&apr->pdr, apr_pd_status);
>> +	if (ret) {
>> +		dev_err(dev, "Failed to init PDR handle\n");
> 
> You need to destroy apr->rxwq here as well.

sry missed this

> 
>> +		return ret;
>> +	}
>> +
>>  	INIT_LIST_HEAD(&apr->rx_list);
>>  	spin_lock_init(&apr->rx_lock);
>>  	spin_lock_init(&apr->svcs_lock);
>>  	idr_init(&apr->svcs_idr);
>> -	of_register_apr_devices(dev);
>> -
>> -	return 0;
>> -}
> 
> Regards,
> Bjorn

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/3] soc: qcom: Introduce Protection Domain Restart helpers
       [not found]     ` <0101016e832bd54d-453473ee-c0fa-44f5-a873-55b97dff4a9a-000000@us-west-2.amazonses.com>
@ 2019-11-19 23:17       ` Bjorn Andersson
  2019-11-20 12:12         ` Sibi Sankar
  0 siblings, 1 reply; 12+ messages in thread
From: Bjorn Andersson @ 2019-11-19 23:17 UTC (permalink / raw)
  To: sibis
  Cc: srinivas.kandagatla, robh+dt, tsoni, agross, mark.rutland,
	linux-arm-msm, linux-remoteproc, linux-kernel, devicetree,
	rnayak

On Tue 19 Nov 02:18 PST 2019, sibis@codeaurora.org wrote:

> Hey Bjorn,
> Thanks for taking the time to
> review the series :)
> 
> On 2019-11-19 12:10, Bjorn Andersson wrote:
> > On Mon 18 Nov 06:27 PST 2019, Sibi Sankar wrote:
> > > diff --git a/drivers/soc/qcom/pdr_interface.c
> > > b/drivers/soc/qcom/pdr_interface.c
> > [..]
> > > +static void pdr_indack_work(struct work_struct *work)
> > > +{
> > > +	struct pdr_handle *pdr = container_of(work, struct pdr_handle,
> > > +					      indack_work);
> > > +	struct pdr_list_node *ind, *tmp;
> > > +	struct pdr_service *pds;
> > > +
> > > +	list_for_each_entry_safe(ind, tmp, &pdr->indack_list, node) {
> > > +		pds = ind->pds;
> > > +		pdr_send_indack_msg(pdr, pds, ind->transaction_id);
> > 
> > So when we et a ind_cb with the new status, we need to send an ack
> > request, which will result in a response, just to confirm that we got
> > the event?
> > 
> > Seems like we should fix the qmi code to make it possible to send a
> > request from the indication handler and then we could simply ignore the
> 
> yeah maybe having a provision to send custom requests back on
> indication would be the way to go. Not all indication need to be
> services with requests.
> 

Let's put this on the todo list.

> > response. Or do we need to not pdr->status() until we get the response
> > for some reason?
> 
> adsp waits on the ack response for a fixed duration and seems to throw
> a fatal err is the ack is not serviced. Hence holding back pd->status
> till we service the ack here.
> 

You mean to ensure that someone sleeping in pd->status() doesn't delay
that until its too late?

[..]
> > > +int pdr_handle_init(struct pdr_handle *pdr,
> > > +		    int (*status)(struct pdr_handle *pdr,
> > > +				  struct pdr_service *pds))
> > > +{
> > [..]
> > > +	pdr->servreg_wq = create_singlethread_workqueue("pdr_servreg_wq");
> > > +	if (!pdr->servreg_wq)
> > > +		return -ENOMEM;
> > > +
> > > +	pdr->indack_wq = alloc_ordered_workqueue("pdr_indack_wq",
> > > WQ_HIGHPRI);
> > 
> > The two workqueues means that we should be able to call pdr->status()
> > rom two concurrent contexts, I don't think our clients will expect that.
> > 
> 
> would creating another ordered wq to relay all the pd->status make
> sense?
> 

I would prefer less work queues ;) But I presume you split out the
indack_wq in order to improve the likelihood of meeting the latency
requirements of the remote side.

Perhaps just wrap the status() calls with a status-mutex and then remove
that by reworking the QMI interface to allow us to remove the indack
work?

Regards,
Bjorn

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/3] soc: qcom: Introduce Protection Domain Restart helpers
  2019-11-19 23:17       ` Bjorn Andersson
@ 2019-11-20 12:12         ` Sibi Sankar
  0 siblings, 0 replies; 12+ messages in thread
From: Sibi Sankar @ 2019-11-20 12:12 UTC (permalink / raw)
  To: Bjorn Andersson
  Cc: srinivas.kandagatla, robh+dt, tsoni, agross, mark.rutland,
	linux-arm-msm, linux-remoteproc, linux-kernel, devicetree,
	rnayak, linux-remoteproc-owner

On 2019-11-20 04:47, Bjorn Andersson wrote:
> On Tue 19 Nov 02:18 PST 2019, sibis@codeaurora.org wrote:
> 
>> Hey Bjorn,
>> Thanks for taking the time to
>> review the series :)
>> 
>> On 2019-11-19 12:10, Bjorn Andersson wrote:
>> > On Mon 18 Nov 06:27 PST 2019, Sibi Sankar wrote:
>> > > diff --git a/drivers/soc/qcom/pdr_interface.c
>> > > b/drivers/soc/qcom/pdr_interface.c
>> > [..]
>> > > +static void pdr_indack_work(struct work_struct *work)
>> > > +{
>> > > +	struct pdr_handle *pdr = container_of(work, struct pdr_handle,
>> > > +					      indack_work);
>> > > +	struct pdr_list_node *ind, *tmp;
>> > > +	struct pdr_service *pds;
>> > > +
>> > > +	list_for_each_entry_safe(ind, tmp, &pdr->indack_list, node) {
>> > > +		pds = ind->pds;
>> > > +		pdr_send_indack_msg(pdr, pds, ind->transaction_id);
>> >
>> > So when we et a ind_cb with the new status, we need to send an ack
>> > request, which will result in a response, just to confirm that we got
>> > the event?
>> >
>> > Seems like we should fix the qmi code to make it possible to send a
>> > request from the indication handler and then we could simply ignore the
>> 
>> yeah maybe having a provision to send custom requests back on
>> indication would be the way to go. Not all indication need to be
>> services with requests.
>> 
> 
> Let's put this on the todo list.
> 
>> > response. Or do we need to not pdr->status() until we get the response
>> > for some reason?
>> 
>> adsp waits on the ack response for a fixed duration and seems to throw
>> a fatal err is the ack is not serviced. Hence holding back pd->status
>> till we service the ack here.
>> 
> 
> You mean to ensure that someone sleeping in pd->status() doesn't delay
> that until its too late?

yes

> 
> [..]
>> > > +int pdr_handle_init(struct pdr_handle *pdr,
>> > > +		    int (*status)(struct pdr_handle *pdr,
>> > > +				  struct pdr_service *pds))
>> > > +{
>> > [..]
>> > > +	pdr->servreg_wq = create_singlethread_workqueue("pdr_servreg_wq");
>> > > +	if (!pdr->servreg_wq)
>> > > +		return -ENOMEM;
>> > > +
>> > > +	pdr->indack_wq = alloc_ordered_workqueue("pdr_indack_wq",
>> > > WQ_HIGHPRI);
>> >
>> > The two workqueues means that we should be able to call pdr->status()
>> > rom two concurrent contexts, I don't think our clients will expect that.
>> >
>> 
>> would creating another ordered wq to relay all the pd->status make
>> sense?
>> 
> 
> I would prefer less work queues ;) But I presume you split out the
> indack_wq in order to improve the likelihood of meeting the latency
> requirements of the remote side.
> 
> Perhaps just wrap the status() calls with a status-mutex and then 
> remove
> that by reworking the QMI interface to allow us to remove the indack
> work?

okay will fix it in the next
re-spin.

> 
> Regards,
> Bjorn

-- 
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum,
a Linux Foundation Collaborative Project.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/3] dt-bindings: soc: qcom: apr: Add protection domain bindings
       [not found] ` <0101016e7ee9c786-fcf80f4e-9b57-4d6b-8806-9ca408e21b55-000000@us-west-2.amazonses.com>
  2019-11-19  5:49   ` [PATCH 2/3] dt-bindings: soc: qcom: apr: Add protection domain bindings Bjorn Andersson
@ 2019-11-21 16:04   ` Srinivas Kandagatla
  1 sibling, 0 replies; 12+ messages in thread
From: Srinivas Kandagatla @ 2019-11-21 16:04 UTC (permalink / raw)
  To: Sibi Sankar, bjorn.andersson, robh+dt, tsoni
  Cc: agross, mark.rutland, linux-arm-msm, linux-remoteproc,
	linux-kernel, devicetree, rnayak



On 18/11/2019 14:28, Sibi Sankar wrote:
> Add optional "qcom,protection-domain" bindings for APR services. This
> helps to capture the dependencies between APR services and the PD on
> which each apr service run.
> 
> Signed-off-by: Sibi Sankar <sibis@codeaurora.org>

LGTM

Reviewed-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
> ---
>   .../devicetree/bindings/soc/qcom/qcom,apr.txt | 59 +++++++++++++++++++
>   1 file changed, 59 insertions(+)
> 
> diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt
> index db501269f47b8..f87c0b2a48de4 100644
> --- a/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt
> +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt
> @@ -45,6 +45,12 @@ by the individual bindings for the specific service
>   			12 - Ultrasound stream manager.
>   			13 - Listen stream manager.
>   
> +- qcom,protection-domain
> +	Usage: optional
> +	Value type: <stringlist>
> +	Definition: Must list the protection domain service name and path
> +		    that the particular apr service has a dependency on.
> +
>   = EXAMPLE
>   The following example represents a QDSP based sound card on a MSM8996 device
>   which uses apr as communication between Apps and QDSP.
> @@ -82,3 +88,56 @@ which uses apr as communication between Apps and QDSP.
>   			...
>   		};
>   	};
> +
> += EXAMPLE 2
> +The following example represents a QDSP based sound card on SDM845 device.
> +Here the apr services are dependent on "avs/audio" service running on AUDIO
> +Protection Domain hosted on ADSP remote processor.
> +
> +	apr {
> +		compatible = "qcom,apr-v2";
> +		qcom,glink-channels = "apr_audio_svc";
> +		qcom,apr-domain = <APR_DOMAIN_ADSP>;
> +
> +		q6core {
> +			compatible = "qcom,q6core";
> +			reg = <APR_SVC_ADSP_CORE>;
> +			qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
> +		};
> +
> +		q6afe: q6afe {
> +			compatible = "qcom,q6afe";
> +			reg = <APR_SVC_AFE>;
> +			qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
> +			q6afedai: dais {
> +				compatible = "qcom,q6afe-dais";
> +				#sound-dai-cells = <1>;
> +
> +				qi2s@22 {
> +					reg = <22>;
> +					qcom,sd-lines = <3>;
> +				};
> +			};
> +		};
> +
> +		q6asm: q6asm {
> +			compatible = "qcom,q6asm";
> +			reg = <APR_SVC_ASM>;
> +			qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
> +			q6asmdai: dais {
> +				compatible = "qcom,q6asm-dais";
> +				#sound-dai-cells = <1>;
> +				iommus = <&apps_smmu 0x1821 0x0>;
> +			};
> +		};
> +
> +		q6adm: q6adm {
> +			compatible = "qcom,q6adm";
> +			reg = <APR_SVC_ADM>;
> +			qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
> +			q6routing: routing {
> +				compatible = "qcom,q6adm-routing";
> +				#sound-dai-cells = <0>;
> +			};
> +		};
> +	};
> 

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/3] dt-bindings: soc: qcom: apr: Add protection domain bindings
       [not found] ` <0101016e7ee9c591-d04928e8-6440-488c-a956-3b5c9b8988bf-000000@us-west-2.amazonses.com>
@ 2019-12-03 21:52   ` Rob Herring
  0 siblings, 0 replies; 12+ messages in thread
From: Rob Herring @ 2019-12-03 21:52 UTC (permalink / raw)
  To: Sibi Sankar
  Cc: bjorn.andersson, srinivas.kandagatla, tsoni, agross,
	mark.rutland, linux-arm-msm, linux-remoteproc, linux-kernel,
	devicetree, rnayak

On Mon, Nov 18, 2019 at 02:28:00PM +0000, Sibi Sankar wrote:
> Add optional "qcom,protection-domain" bindings for APR services. This
> helps to capture the dependencies between APR services and the PD on
> which each apr service run.
> 
> Signed-off-by: Sibi Sankar <sibis@codeaurora.org>
> ---
>  .../devicetree/bindings/soc/qcom/qcom,apr.txt | 59 +++++++++++++++++++
>  1 file changed, 59 insertions(+)
> 
> diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt
> index db501269f47b8..f87c0b2a48de4 100644
> --- a/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt
> +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt
> @@ -45,6 +45,12 @@ by the individual bindings for the specific service
>  			12 - Ultrasound stream manager.
>  			13 - Listen stream manager.
>  
> +- qcom,protection-domain
> +	Usage: optional
> +	Value type: <stringlist>
> +	Definition: Must list the protection domain service name and path
> +		    that the particular apr service has a dependency on.

Is name and path 2 values? Length is always 2?

You've got the same values for every case in the example. Is there a 
defined list of possible values?

> +
>  = EXAMPLE
>  The following example represents a QDSP based sound card on a MSM8996 device
>  which uses apr as communication between Apps and QDSP.
> @@ -82,3 +88,56 @@ which uses apr as communication between Apps and QDSP.
>  			...
>  		};
>  	};
> +
> += EXAMPLE 2
> +The following example represents a QDSP based sound card on SDM845 device.
> +Here the apr services are dependent on "avs/audio" service running on AUDIO
> +Protection Domain hosted on ADSP remote processor.
> +
> +	apr {
> +		compatible = "qcom,apr-v2";
> +		qcom,glink-channels = "apr_audio_svc";
> +		qcom,apr-domain = <APR_DOMAIN_ADSP>;
> +
> +		q6core {
> +			compatible = "qcom,q6core";
> +			reg = <APR_SVC_ADSP_CORE>;
> +			qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
> +		};
> +
> +		q6afe: q6afe {
> +			compatible = "qcom,q6afe";
> +			reg = <APR_SVC_AFE>;
> +			qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
> +			q6afedai: dais {
> +				compatible = "qcom,q6afe-dais";
> +				#sound-dai-cells = <1>;
> +
> +				qi2s@22 {
> +					reg = <22>;
> +					qcom,sd-lines = <3>;
> +				};
> +			};
> +		};
> +
> +		q6asm: q6asm {
> +			compatible = "qcom,q6asm";
> +			reg = <APR_SVC_ASM>;
> +			qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
> +			q6asmdai: dais {
> +				compatible = "qcom,q6asm-dais";
> +				#sound-dai-cells = <1>;
> +				iommus = <&apps_smmu 0x1821 0x0>;
> +			};
> +		};
> +
> +		q6adm: q6adm {
> +			compatible = "qcom,q6adm";
> +			reg = <APR_SVC_ADM>;
> +			qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
> +			q6routing: routing {
> +				compatible = "qcom,q6adm-routing";
> +				#sound-dai-cells = <0>;
> +			};
> +		};
> +	};
> -- 
> The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
> a Linux Foundation Collaborative Project
> 

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, back to index

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <20191118142728.30187-1-sibis@codeaurora.org>
2019-11-18 14:27 ` [PATCH 1/3] soc: qcom: Introduce Protection Domain Restart helpers Sibi Sankar
2019-11-18 14:28 ` [PATCH 2/3] dt-bindings: soc: qcom: apr: Add protection domain bindings Sibi Sankar
2019-11-18 14:28 ` [PATCH 3/3] soc: qcom: apr: Add avs/audio tracking functionality Sibi Sankar
     [not found] ` <0101016e7ee9c786-fcf80f4e-9b57-4d6b-8806-9ca408e21b55-000000@us-west-2.amazonses.com>
2019-11-19  5:49   ` [PATCH 2/3] dt-bindings: soc: qcom: apr: Add protection domain bindings Bjorn Andersson
2019-11-21 16:04   ` Srinivas Kandagatla
     [not found] ` <0101016e7ee9be5e-1d6bbe06-4bab-434d-9040-ebfa3918b213-000000@us-west-2.amazonses.com>
2019-11-19  6:40   ` [PATCH 1/3] soc: qcom: Introduce Protection Domain Restart helpers Bjorn Andersson
2019-11-19 10:18     ` sibis
     [not found]     ` <0101016e832bd54d-453473ee-c0fa-44f5-a873-55b97dff4a9a-000000@us-west-2.amazonses.com>
2019-11-19 23:17       ` Bjorn Andersson
2019-11-20 12:12         ` Sibi Sankar
     [not found] ` <0101016e7ee9d8b5-9759d0ba-4acf-4fc4-a863-fac9c738397f-000000@us-west-2.amazonses.com>
2019-11-19  6:53   ` [PATCH 3/3] soc: qcom: apr: Add avs/audio tracking functionality Bjorn Andersson
2019-11-19 10:25     ` sibis
     [not found] ` <0101016e7ee9c591-d04928e8-6440-488c-a956-3b5c9b8988bf-000000@us-west-2.amazonses.com>
2019-12-03 21:52   ` [PATCH 2/3] dt-bindings: soc: qcom: apr: Add protection domain bindings Rob Herring

Linux-Devicetree Archive on lore.kernel.org

Archives are clonable:
	git clone --mirror https://lore.kernel.org/linux-devicetree/0 linux-devicetree/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 linux-devicetree linux-devicetree/ https://lore.kernel.org/linux-devicetree \
		devicetree@vger.kernel.org
	public-inbox-index linux-devicetree

Example config snippet for mirrors

Newsgroup available over NNTP:
	nntp://nntp.lore.kernel.org/org.kernel.vger.linux-devicetree


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git