From: Lina Iyer <ilina@codeaurora.org>
To: agross@kernel.org, bjorn.andersson@linaro.org
Cc: linux-arm-msm@vger.kernel.org, linux-soc@vger.kernel.org,
rnayak@codeaurora.org, linux-kernel@vger.kernel.org,
linux-pm@vger.kernel.org, swboyd@chromium.org,
dianders@chromium.org, mkshah@codeaurora.org,
"Raju P.L.S.S.S.N" <rplsssn@codeaurora.org>
Subject: Re: [PATCH 1/2] drivers: qcom: rpmh-rsc: simplify TCS locking
Date: Mon, 1 Jul 2019 09:52:55 -0600 [thread overview]
Message-ID: <20190701155255.GC24030@codeaurora.org> (raw)
In-Reply-To: <20190701152907.16407-1-ilina@codeaurora.org>
Switching Andy's email address.
On Mon, Jul 01 2019 at 09:32 -0600, Lina Iyer wrote:
>From: "Raju P.L.S.S.S.N" <rplsssn@codeaurora.org>
>
>tcs->lock was introduced to serialize access with in TCS group. But
>even without tcs->lock, drv->lock is serving the same purpose. So
>use a single drv->lock.
>
>Other optimizations include -
> - Remove locking around clear_bit() in IRQ handler. clear_bit() is
> atomic.
> - Remove redundant read of TCS registers.
> - Use spin_lock instead of _irq variants as the locks are not held
> in interrupt context.
>
>Fixes: 658628 ("drivers: qcom: rpmh-rsc: add RPMH controller for QCOM
>SoCs")
>Signed-off-by: Raju P.L.S.S.S.N <rplsssn@codeaurora.org>
>Signed-off-by: Lina Iyer <ilina@codeaurora.org>
>---
> drivers/soc/qcom/rpmh-internal.h | 2 --
> drivers/soc/qcom/rpmh-rsc.c | 37 +++++++++++---------------------
> drivers/soc/qcom/rpmh.c | 20 +++++++----------
> 3 files changed, 21 insertions(+), 38 deletions(-)
>
>diff --git a/drivers/soc/qcom/rpmh-internal.h b/drivers/soc/qcom/rpmh-internal.h
>index a7bbbb67991c..969d5030860e 100644
>--- a/drivers/soc/qcom/rpmh-internal.h
>+++ b/drivers/soc/qcom/rpmh-internal.h
>@@ -28,7 +28,6 @@ struct rsc_drv;
> * @offset: start of the TCS group relative to the TCSes in the RSC
> * @num_tcs: number of TCSes in this type
> * @ncpt: number of commands in each TCS
>- * @lock: lock for synchronizing this TCS writes
> * @req: requests that are sent from the TCS
> * @cmd_cache: flattened cache of cmds in sleep/wake TCS
> * @slots: indicates which of @cmd_addr are occupied
>@@ -40,7 +39,6 @@ struct tcs_group {
> u32 offset;
> int num_tcs;
> int ncpt;
>- spinlock_t lock;
> const struct tcs_request *req[MAX_TCS_PER_TYPE];
> u32 *cmd_cache;
> DECLARE_BITMAP(slots, MAX_TCS_SLOTS);
>diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
>index e278fc11fe5c..92461311aef3 100644
>--- a/drivers/soc/qcom/rpmh-rsc.c
>+++ b/drivers/soc/qcom/rpmh-rsc.c
>@@ -93,8 +93,7 @@ static void write_tcs_reg_sync(struct rsc_drv *drv, int reg, int tcs_id,
>
> static bool tcs_is_free(struct rsc_drv *drv, int tcs_id)
> {
>- return !test_bit(tcs_id, drv->tcs_in_use) &&
>- read_tcs_reg(drv, RSC_DRV_STATUS, tcs_id, 0);
>+ return !test_bit(tcs_id, drv->tcs_in_use);
> }
>
> static struct tcs_group *get_tcs_of_type(struct rsc_drv *drv, int type)
>@@ -104,29 +103,28 @@ static struct tcs_group *get_tcs_of_type(struct rsc_drv *drv, int type)
>
> static int tcs_invalidate(struct rsc_drv *drv, int type)
> {
>- int m;
>+ int m, ret = 0;
> struct tcs_group *tcs;
>
> tcs = get_tcs_of_type(drv, type);
>
>- spin_lock(&tcs->lock);
>- if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS)) {
>- spin_unlock(&tcs->lock);
>- return 0;
>- }
>+ spin_lock(&drv->lock);
>+ if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS))
>+ goto done;
>
> for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
> if (!tcs_is_free(drv, m)) {
>- spin_unlock(&tcs->lock);
>- return -EAGAIN;
>+ ret = -EAGAIN;
>+ goto done;
> }
> write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, m, 0);
> write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, m, 0);
> }
> bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
>- spin_unlock(&tcs->lock);
>
>- return 0;
>+done:
>+ spin_unlock(&drv->lock);
>+ return ret;
> }
>
> /**
>@@ -242,9 +240,7 @@ static irqreturn_t tcs_tx_done(int irq, void *p)
> write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i, 0);
> write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, i, 0);
> write_tcs_reg(drv, RSC_DRV_IRQ_CLEAR, 0, BIT(i));
>- spin_lock(&drv->lock);
> clear_bit(i, drv->tcs_in_use);
>- spin_unlock(&drv->lock);
> if (req)
> rpmh_tx_done(req, err);
> }
>@@ -349,14 +345,12 @@ static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg)
> {
> struct tcs_group *tcs;
> int tcs_id;
>- unsigned long flags;
> int ret;
>
> tcs = get_tcs_for_msg(drv, msg);
> if (IS_ERR(tcs))
> return PTR_ERR(tcs);
>
>- spin_lock_irqsave(&tcs->lock, flags);
> spin_lock(&drv->lock);
> /*
> * The h/w does not like if we send a request to the same address,
>@@ -364,26 +358,23 @@ static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg)
> */
> ret = check_for_req_inflight(drv, tcs, msg);
> if (ret) {
>- spin_unlock(&drv->lock);
> goto done_write;
> }
>
> tcs_id = find_free_tcs(tcs);
> if (tcs_id < 0) {
> ret = tcs_id;
>- spin_unlock(&drv->lock);
> goto done_write;
> }
>
> tcs->req[tcs_id - tcs->offset] = msg;
> set_bit(tcs_id, drv->tcs_in_use);
>- spin_unlock(&drv->lock);
>
> __tcs_buffer_write(drv, tcs_id, 0, msg);
> __tcs_trigger(drv, tcs_id);
>
> done_write:
>- spin_unlock_irqrestore(&tcs->lock, flags);
>+ spin_unlock(&drv->lock);
> return ret;
> }
>
>@@ -481,19 +472,18 @@ static int tcs_ctrl_write(struct rsc_drv *drv, const struct tcs_request *msg)
> {
> struct tcs_group *tcs;
> int tcs_id = 0, cmd_id = 0;
>- unsigned long flags;
> int ret;
>
> tcs = get_tcs_for_msg(drv, msg);
> if (IS_ERR(tcs))
> return PTR_ERR(tcs);
>
>- spin_lock_irqsave(&tcs->lock, flags);
>+ spin_lock(&drv->lock);
> /* find the TCS id and the command in the TCS to write to */
> ret = find_slots(tcs, msg, &tcs_id, &cmd_id);
> if (!ret)
> __tcs_buffer_write(drv, tcs_id, cmd_id, msg);
>- spin_unlock_irqrestore(&tcs->lock, flags);
>+ spin_unlock(&drv->lock);
>
> return ret;
> }
>@@ -584,7 +574,6 @@ static int rpmh_probe_tcs_config(struct platform_device *pdev,
> tcs->type = tcs_cfg[i].type;
> tcs->num_tcs = tcs_cfg[i].n;
> tcs->ncpt = ncpt;
>- spin_lock_init(&tcs->lock);
>
> if (!tcs->num_tcs || tcs->type == CONTROL_TCS)
> continue;
>diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
>index 035091fd44b8..12f830610b94 100644
>--- a/drivers/soc/qcom/rpmh.c
>+++ b/drivers/soc/qcom/rpmh.c
>@@ -118,9 +118,8 @@ static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
> struct tcs_cmd *cmd)
> {
> struct cache_req *req;
>- unsigned long flags;
>
>- spin_lock_irqsave(&ctrlr->cache_lock, flags);
>+ spin_lock(&ctrlr->cache_lock);
> req = __find_req(ctrlr, cmd->addr);
> if (req)
> goto existing;
>@@ -154,7 +153,7 @@ static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
>
> ctrlr->dirty = true;
> unlock:
>- spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
>+ spin_unlock(&ctrlr->cache_lock);
>
> return req;
> }
>@@ -283,23 +282,21 @@ EXPORT_SYMBOL(rpmh_write);
>
> static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
> {
>- unsigned long flags;
>
>- spin_lock_irqsave(&ctrlr->cache_lock, flags);
>+ spin_lock(&ctrlr->cache_lock);
> list_add_tail(&req->list, &ctrlr->batch_cache);
>- spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
>+ spin_unlock(&ctrlr->cache_lock);
> }
>
> static int flush_batch(struct rpmh_ctrlr *ctrlr)
> {
> struct batch_cache_req *req;
> const struct rpmh_request *rpm_msg;
>- unsigned long flags;
> int ret = 0;
> int i;
>
> /* Send Sleep/Wake requests to the controller, expect no response */
>- spin_lock_irqsave(&ctrlr->cache_lock, flags);
>+ spin_lock(&ctrlr->cache_lock);
> list_for_each_entry(req, &ctrlr->batch_cache, list) {
> for (i = 0; i < req->count; i++) {
> rpm_msg = req->rpm_msgs + i;
>@@ -309,7 +306,7 @@ static int flush_batch(struct rpmh_ctrlr *ctrlr)
> break;
> }
> }
>- spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
>+ spin_unlock(&ctrlr->cache_lock);
>
> return ret;
> }
>@@ -317,13 +314,12 @@ static int flush_batch(struct rpmh_ctrlr *ctrlr)
> static void invalidate_batch(struct rpmh_ctrlr *ctrlr)
> {
> struct batch_cache_req *req, *tmp;
>- unsigned long flags;
>
>- spin_lock_irqsave(&ctrlr->cache_lock, flags);
>+ spin_lock(&ctrlr->cache_lock);
> list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
> kfree(req);
> INIT_LIST_HEAD(&ctrlr->batch_cache);
>- spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
>+ spin_unlock(&ctrlr->cache_lock);
> }
>
> /**
>--
>The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
>a Linux Foundation Collaborative Project
>
next prev parent reply other threads:[~2019-07-01 15:53 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-07-01 15:29 [PATCH 1/2] drivers: qcom: rpmh-rsc: simplify TCS locking Lina Iyer
2019-07-01 15:29 ` [PATCH 2/2] drivers: qcom: rpmh-rsc: fix read back of trigger register Lina Iyer
2019-07-01 15:53 ` Lina Iyer
2019-07-19 18:22 ` Stephen Boyd
2019-07-22 15:51 ` Lina Iyer
2019-07-01 15:52 ` Lina Iyer [this message]
2019-07-19 18:20 ` [PATCH 1/2] drivers: qcom: rpmh-rsc: simplify TCS locking Stephen Boyd
2019-07-22 16:20 ` Lina Iyer
2019-07-22 18:18 ` Stephen Boyd
2019-07-22 19:46 ` Lina Iyer
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190701155255.GC24030@codeaurora.org \
--to=ilina@codeaurora.org \
--cc=agross@kernel.org \
--cc=bjorn.andersson@linaro.org \
--cc=dianders@chromium.org \
--cc=linux-arm-msm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-pm@vger.kernel.org \
--cc=linux-soc@vger.kernel.org \
--cc=mkshah@codeaurora.org \
--cc=rnayak@codeaurora.org \
--cc=rplsssn@codeaurora.org \
--cc=swboyd@chromium.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).