* [PATCH v2] mhi: core: Factorize mhi queuing
@ 2021-01-08 16:52 Loic Poulain
2021-01-21 11:19 ` Manivannan Sadhasivam
2021-01-21 11:23 ` Manivannan Sadhasivam
0 siblings, 2 replies; 3+ messages in thread
From: Loic Poulain @ 2021-01-08 16:52 UTC (permalink / raw)
To: manivannan.sadhasivam; +Cc: linux-arm-msm, hemantk, Loic Poulain
Instead of duplicating queuing procedure in mhi_queue_dma(),
mhi_queue_buf() and mhi_queue_skb(), add a new generic mhi_queue()
as common helper.
Note that the unified mhi_queue align pm_lock locking on mhi_queue_buf
behavior, taking it with irqsave variant (vs _bh for former queue_skb
and queue_dma version).
Signed-off-by: Loic Poulain <loic.poulain@linaro.org>
---
v2: re-integrate pre_alloc check (Mani)
comment about _bh to _irqsave unification
drivers/bus/mhi/core/main.c | 161 +++++++++++---------------------------------
1 file changed, 40 insertions(+), 121 deletions(-)
diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
index effe94f..2d9157ce 100644
--- a/drivers/bus/mhi/core/main.c
+++ b/drivers/bus/mhi/core/main.c
@@ -969,118 +969,81 @@ static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
return (tmp == ring->rp);
}
-int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
- struct sk_buff *skb, size_t len, enum mhi_flags mflags)
+static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
+ enum dma_data_direction dir, enum mhi_flags mflags)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
mhi_dev->dl_chan;
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
- struct mhi_buf_info buf_info = { };
+ unsigned long flags;
int ret;
- /* If MHI host pre-allocates buffers then client drivers cannot queue */
- if (mhi_chan->pre_alloc)
+ if (unlikely(mhi_chan->pre_alloc))
return -EINVAL;
- if (mhi_is_ring_full(mhi_cntrl, tre_ring))
- return -ENOMEM;
-
- read_lock_bh(&mhi_cntrl->pm_lock);
- if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
- read_unlock_bh(&mhi_cntrl->pm_lock);
+ if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
return -EIO;
+
+ read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
+
+ ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
+ if (unlikely(ret)) {
+ ret = -ENOMEM;
+ goto exit_unlock;
}
- /* we're in M3 or transitioning to M3 */
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
+ if (unlikely(ret))
+ goto exit_unlock;
+
+ /* trigger M3 exit if necessary */
if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
mhi_trigger_resume(mhi_cntrl);
- /* Toggle wake to exit out of M2 */
+ /* Assert dev_wake (to exit/prevent M1/M2)*/
mhi_cntrl->wake_toggle(mhi_cntrl);
- buf_info.v_addr = skb->data;
- buf_info.cb_buf = skb;
- buf_info.len = len;
-
- ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
- if (unlikely(ret)) {
- read_unlock_bh(&mhi_cntrl->pm_lock);
- return ret;
- }
-
if (mhi_chan->dir == DMA_TO_DEVICE)
atomic_inc(&mhi_cntrl->pending_pkts);
- if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
- read_lock_bh(&mhi_chan->lock);
- mhi_ring_chan_db(mhi_cntrl, mhi_chan);
- read_unlock_bh(&mhi_chan->lock);
+ if (unlikely(!MHI_DB_ACCESS_VALID(mhi_cntrl))) {
+ ret = -EIO;
+ goto exit_unlock;
}
- read_unlock_bh(&mhi_cntrl->pm_lock);
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
- return 0;
+exit_unlock:
+ read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(mhi_queue_skb);
-int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
- struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
+int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ struct sk_buff *skb, size_t len, enum mhi_flags mflags)
{
- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
- struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
- mhi_dev->dl_chan;
- struct device *dev = &mhi_cntrl->mhi_dev->dev;
- struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
struct mhi_buf_info buf_info = { };
- int ret;
-
- /* If MHI host pre-allocates buffers then client drivers cannot queue */
- if (mhi_chan->pre_alloc)
- return -EINVAL;
-
- if (mhi_is_ring_full(mhi_cntrl, tre_ring))
- return -ENOMEM;
-
- read_lock_bh(&mhi_cntrl->pm_lock);
- if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
- dev_err(dev, "MHI is not in activate state, PM state: %s\n",
- to_mhi_pm_state_str(mhi_cntrl->pm_state));
- read_unlock_bh(&mhi_cntrl->pm_lock);
- return -EIO;
- }
+ buf_info.v_addr = skb->data;
+ buf_info.cb_buf = skb;
+ buf_info.len = len;
- /* we're in M3 or transitioning to M3 */
- if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
- mhi_trigger_resume(mhi_cntrl);
+ return mhi_queue(mhi_dev, &buf_info, dir, mflags);
+}
+EXPORT_SYMBOL_GPL(mhi_queue_skb);
- /* Toggle wake to exit out of M2 */
- mhi_cntrl->wake_toggle(mhi_cntrl);
+int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
+{
+ struct mhi_buf_info buf_info = { };
buf_info.p_addr = mhi_buf->dma_addr;
buf_info.cb_buf = mhi_buf;
buf_info.pre_mapped = true;
buf_info.len = len;
- ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
- if (unlikely(ret)) {
- read_unlock_bh(&mhi_cntrl->pm_lock);
- return ret;
- }
-
- if (mhi_chan->dir == DMA_TO_DEVICE)
- atomic_inc(&mhi_cntrl->pending_pkts);
-
- if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
- read_lock_bh(&mhi_chan->lock);
- mhi_ring_chan_db(mhi_cntrl, mhi_chan);
- read_unlock_bh(&mhi_chan->lock);
- }
-
- read_unlock_bh(&mhi_cntrl->pm_lock);
-
- return 0;
+ return mhi_queue(mhi_dev, &buf_info, dir, mflags);
}
EXPORT_SYMBOL_GPL(mhi_queue_dma);
@@ -1134,57 +1097,13 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
void *buf, size_t len, enum mhi_flags mflags)
{
- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
- struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
- mhi_dev->dl_chan;
- struct mhi_ring *tre_ring;
struct mhi_buf_info buf_info = { };
- unsigned long flags;
- int ret;
-
- /*
- * this check here only as a guard, it's always
- * possible mhi can enter error while executing rest of function,
- * which is not fatal so we do not need to hold pm_lock
- */
- if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
- return -EIO;
-
- tre_ring = &mhi_chan->tre_ring;
- if (mhi_is_ring_full(mhi_cntrl, tre_ring))
- return -ENOMEM;
buf_info.v_addr = buf;
buf_info.cb_buf = buf;
buf_info.len = len;
- ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
- if (unlikely(ret))
- return ret;
-
- read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
-
- /* we're in M3 or transitioning to M3 */
- if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
- mhi_trigger_resume(mhi_cntrl);
-
- /* Toggle wake to exit out of M2 */
- mhi_cntrl->wake_toggle(mhi_cntrl);
-
- if (mhi_chan->dir == DMA_TO_DEVICE)
- atomic_inc(&mhi_cntrl->pending_pkts);
-
- if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
- unsigned long flags;
-
- read_lock_irqsave(&mhi_chan->lock, flags);
- mhi_ring_chan_db(mhi_cntrl, mhi_chan);
- read_unlock_irqrestore(&mhi_chan->lock, flags);
- }
-
- read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
-
- return 0;
+ return mhi_queue(mhi_dev, &buf_info, dir, mflags);
}
EXPORT_SYMBOL_GPL(mhi_queue_buf);
--
2.7.4
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH v2] mhi: core: Factorize mhi queuing
2021-01-08 16:52 [PATCH v2] mhi: core: Factorize mhi queuing Loic Poulain
@ 2021-01-21 11:19 ` Manivannan Sadhasivam
2021-01-21 11:23 ` Manivannan Sadhasivam
1 sibling, 0 replies; 3+ messages in thread
From: Manivannan Sadhasivam @ 2021-01-21 11:19 UTC (permalink / raw)
To: Loic Poulain; +Cc: linux-arm-msm, hemantk
On Fri, Jan 08, 2021 at 05:52:30PM +0100, Loic Poulain wrote:
> Instead of duplicating queuing procedure in mhi_queue_dma(),
> mhi_queue_buf() and mhi_queue_skb(), add a new generic mhi_queue()
> as common helper.
>
> Note that the unified mhi_queue align pm_lock locking on mhi_queue_buf
> behavior, taking it with irqsave variant (vs _bh for former queue_skb
> and queue_dma version).
>
> Signed-off-by: Loic Poulain <loic.poulain@linaro.org>
This looks much cleaner!
Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Thanks,
Mani
> ---
> v2: re-integrate pre_alloc check (Mani)
> comment about _bh to _irqsave unification
>
> drivers/bus/mhi/core/main.c | 161 +++++++++++---------------------------------
> 1 file changed, 40 insertions(+), 121 deletions(-)
>
> diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
> index effe94f..2d9157ce 100644
> --- a/drivers/bus/mhi/core/main.c
> +++ b/drivers/bus/mhi/core/main.c
> @@ -969,118 +969,81 @@ static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
> return (tmp == ring->rp);
> }
>
> -int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
> - struct sk_buff *skb, size_t len, enum mhi_flags mflags)
> +static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
> + enum dma_data_direction dir, enum mhi_flags mflags)
> {
> struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
> struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
> mhi_dev->dl_chan;
> struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
> - struct mhi_buf_info buf_info = { };
> + unsigned long flags;
> int ret;
>
> - /* If MHI host pre-allocates buffers then client drivers cannot queue */
> - if (mhi_chan->pre_alloc)
> + if (unlikely(mhi_chan->pre_alloc))
> return -EINVAL;
>
> - if (mhi_is_ring_full(mhi_cntrl, tre_ring))
> - return -ENOMEM;
> -
> - read_lock_bh(&mhi_cntrl->pm_lock);
> - if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
> - read_unlock_bh(&mhi_cntrl->pm_lock);
> + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
> return -EIO;
> +
> + read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
> +
> + ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
> + if (unlikely(ret)) {
> + ret = -ENOMEM;
> + goto exit_unlock;
> }
>
> - /* we're in M3 or transitioning to M3 */
> + ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
> + if (unlikely(ret))
> + goto exit_unlock;
> +
> + /* trigger M3 exit if necessary */
> if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
> mhi_trigger_resume(mhi_cntrl);
>
> - /* Toggle wake to exit out of M2 */
> + /* Assert dev_wake (to exit/prevent M1/M2)*/
> mhi_cntrl->wake_toggle(mhi_cntrl);
>
> - buf_info.v_addr = skb->data;
> - buf_info.cb_buf = skb;
> - buf_info.len = len;
> -
> - ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
> - if (unlikely(ret)) {
> - read_unlock_bh(&mhi_cntrl->pm_lock);
> - return ret;
> - }
> -
> if (mhi_chan->dir == DMA_TO_DEVICE)
> atomic_inc(&mhi_cntrl->pending_pkts);
>
> - if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
> - read_lock_bh(&mhi_chan->lock);
> - mhi_ring_chan_db(mhi_cntrl, mhi_chan);
> - read_unlock_bh(&mhi_chan->lock);
> + if (unlikely(!MHI_DB_ACCESS_VALID(mhi_cntrl))) {
> + ret = -EIO;
> + goto exit_unlock;
> }
>
> - read_unlock_bh(&mhi_cntrl->pm_lock);
> + mhi_ring_chan_db(mhi_cntrl, mhi_chan);
>
> - return 0;
> +exit_unlock:
> + read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
> +
> + return ret;
> }
> -EXPORT_SYMBOL_GPL(mhi_queue_skb);
>
> -int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
> - struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
> +int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
> + struct sk_buff *skb, size_t len, enum mhi_flags mflags)
> {
> - struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
> - struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
> - mhi_dev->dl_chan;
> - struct device *dev = &mhi_cntrl->mhi_dev->dev;
> - struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
> struct mhi_buf_info buf_info = { };
> - int ret;
> -
> - /* If MHI host pre-allocates buffers then client drivers cannot queue */
> - if (mhi_chan->pre_alloc)
> - return -EINVAL;
> -
> - if (mhi_is_ring_full(mhi_cntrl, tre_ring))
> - return -ENOMEM;
> -
> - read_lock_bh(&mhi_cntrl->pm_lock);
> - if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
> - dev_err(dev, "MHI is not in activate state, PM state: %s\n",
> - to_mhi_pm_state_str(mhi_cntrl->pm_state));
> - read_unlock_bh(&mhi_cntrl->pm_lock);
>
> - return -EIO;
> - }
> + buf_info.v_addr = skb->data;
> + buf_info.cb_buf = skb;
> + buf_info.len = len;
>
> - /* we're in M3 or transitioning to M3 */
> - if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
> - mhi_trigger_resume(mhi_cntrl);
> + return mhi_queue(mhi_dev, &buf_info, dir, mflags);
> +}
> +EXPORT_SYMBOL_GPL(mhi_queue_skb);
>
> - /* Toggle wake to exit out of M2 */
> - mhi_cntrl->wake_toggle(mhi_cntrl);
> +int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
> + struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
> +{
> + struct mhi_buf_info buf_info = { };
>
> buf_info.p_addr = mhi_buf->dma_addr;
> buf_info.cb_buf = mhi_buf;
> buf_info.pre_mapped = true;
> buf_info.len = len;
>
> - ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
> - if (unlikely(ret)) {
> - read_unlock_bh(&mhi_cntrl->pm_lock);
> - return ret;
> - }
> -
> - if (mhi_chan->dir == DMA_TO_DEVICE)
> - atomic_inc(&mhi_cntrl->pending_pkts);
> -
> - if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
> - read_lock_bh(&mhi_chan->lock);
> - mhi_ring_chan_db(mhi_cntrl, mhi_chan);
> - read_unlock_bh(&mhi_chan->lock);
> - }
> -
> - read_unlock_bh(&mhi_cntrl->pm_lock);
> -
> - return 0;
> + return mhi_queue(mhi_dev, &buf_info, dir, mflags);
> }
> EXPORT_SYMBOL_GPL(mhi_queue_dma);
>
> @@ -1134,57 +1097,13 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
> int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
> void *buf, size_t len, enum mhi_flags mflags)
> {
> - struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
> - struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
> - mhi_dev->dl_chan;
> - struct mhi_ring *tre_ring;
> struct mhi_buf_info buf_info = { };
> - unsigned long flags;
> - int ret;
> -
> - /*
> - * this check here only as a guard, it's always
> - * possible mhi can enter error while executing rest of function,
> - * which is not fatal so we do not need to hold pm_lock
> - */
> - if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
> - return -EIO;
> -
> - tre_ring = &mhi_chan->tre_ring;
> - if (mhi_is_ring_full(mhi_cntrl, tre_ring))
> - return -ENOMEM;
>
> buf_info.v_addr = buf;
> buf_info.cb_buf = buf;
> buf_info.len = len;
>
> - ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
> - if (unlikely(ret))
> - return ret;
> -
> - read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
> -
> - /* we're in M3 or transitioning to M3 */
> - if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
> - mhi_trigger_resume(mhi_cntrl);
> -
> - /* Toggle wake to exit out of M2 */
> - mhi_cntrl->wake_toggle(mhi_cntrl);
> -
> - if (mhi_chan->dir == DMA_TO_DEVICE)
> - atomic_inc(&mhi_cntrl->pending_pkts);
> -
> - if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
> - unsigned long flags;
> -
> - read_lock_irqsave(&mhi_chan->lock, flags);
> - mhi_ring_chan_db(mhi_cntrl, mhi_chan);
> - read_unlock_irqrestore(&mhi_chan->lock, flags);
> - }
> -
> - read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
> -
> - return 0;
> + return mhi_queue(mhi_dev, &buf_info, dir, mflags);
> }
> EXPORT_SYMBOL_GPL(mhi_queue_buf);
>
> --
> 2.7.4
>
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH v2] mhi: core: Factorize mhi queuing
2021-01-08 16:52 [PATCH v2] mhi: core: Factorize mhi queuing Loic Poulain
2021-01-21 11:19 ` Manivannan Sadhasivam
@ 2021-01-21 11:23 ` Manivannan Sadhasivam
1 sibling, 0 replies; 3+ messages in thread
From: Manivannan Sadhasivam @ 2021-01-21 11:23 UTC (permalink / raw)
To: Loic Poulain; +Cc: linux-arm-msm, hemantk
On Fri, Jan 08, 2021 at 05:52:30PM +0100, Loic Poulain wrote:
> Instead of duplicating queuing procedure in mhi_queue_dma(),
> mhi_queue_buf() and mhi_queue_skb(), add a new generic mhi_queue()
> as common helper.
>
> Note that the unified mhi_queue align pm_lock locking on mhi_queue_buf
> behavior, taking it with irqsave variant (vs _bh for former queue_skb
> and queue_dma version).
>
> Signed-off-by: Loic Poulain <loic.poulain@linaro.org>
Applied to mhi-next!
Thanks,
Mani
> ---
> v2: re-integrate pre_alloc check (Mani)
> comment about _bh to _irqsave unification
>
> drivers/bus/mhi/core/main.c | 161 +++++++++++---------------------------------
> 1 file changed, 40 insertions(+), 121 deletions(-)
>
> diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
> index effe94f..2d9157ce 100644
> --- a/drivers/bus/mhi/core/main.c
> +++ b/drivers/bus/mhi/core/main.c
> @@ -969,118 +969,81 @@ static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
> return (tmp == ring->rp);
> }
>
> -int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
> - struct sk_buff *skb, size_t len, enum mhi_flags mflags)
> +static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
> + enum dma_data_direction dir, enum mhi_flags mflags)
> {
> struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
> struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
> mhi_dev->dl_chan;
> struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
> - struct mhi_buf_info buf_info = { };
> + unsigned long flags;
> int ret;
>
> - /* If MHI host pre-allocates buffers then client drivers cannot queue */
> - if (mhi_chan->pre_alloc)
> + if (unlikely(mhi_chan->pre_alloc))
> return -EINVAL;
>
> - if (mhi_is_ring_full(mhi_cntrl, tre_ring))
> - return -ENOMEM;
> -
> - read_lock_bh(&mhi_cntrl->pm_lock);
> - if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
> - read_unlock_bh(&mhi_cntrl->pm_lock);
> + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
> return -EIO;
> +
> + read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
> +
> + ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
> + if (unlikely(ret)) {
> + ret = -ENOMEM;
> + goto exit_unlock;
> }
>
> - /* we're in M3 or transitioning to M3 */
> + ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
> + if (unlikely(ret))
> + goto exit_unlock;
> +
> + /* trigger M3 exit if necessary */
> if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
> mhi_trigger_resume(mhi_cntrl);
>
> - /* Toggle wake to exit out of M2 */
> + /* Assert dev_wake (to exit/prevent M1/M2)*/
> mhi_cntrl->wake_toggle(mhi_cntrl);
>
> - buf_info.v_addr = skb->data;
> - buf_info.cb_buf = skb;
> - buf_info.len = len;
> -
> - ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
> - if (unlikely(ret)) {
> - read_unlock_bh(&mhi_cntrl->pm_lock);
> - return ret;
> - }
> -
> if (mhi_chan->dir == DMA_TO_DEVICE)
> atomic_inc(&mhi_cntrl->pending_pkts);
>
> - if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
> - read_lock_bh(&mhi_chan->lock);
> - mhi_ring_chan_db(mhi_cntrl, mhi_chan);
> - read_unlock_bh(&mhi_chan->lock);
> + if (unlikely(!MHI_DB_ACCESS_VALID(mhi_cntrl))) {
> + ret = -EIO;
> + goto exit_unlock;
> }
>
> - read_unlock_bh(&mhi_cntrl->pm_lock);
> + mhi_ring_chan_db(mhi_cntrl, mhi_chan);
>
> - return 0;
> +exit_unlock:
> + read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
> +
> + return ret;
> }
> -EXPORT_SYMBOL_GPL(mhi_queue_skb);
>
> -int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
> - struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
> +int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
> + struct sk_buff *skb, size_t len, enum mhi_flags mflags)
> {
> - struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
> - struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
> - mhi_dev->dl_chan;
> - struct device *dev = &mhi_cntrl->mhi_dev->dev;
> - struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
> struct mhi_buf_info buf_info = { };
> - int ret;
> -
> - /* If MHI host pre-allocates buffers then client drivers cannot queue */
> - if (mhi_chan->pre_alloc)
> - return -EINVAL;
> -
> - if (mhi_is_ring_full(mhi_cntrl, tre_ring))
> - return -ENOMEM;
> -
> - read_lock_bh(&mhi_cntrl->pm_lock);
> - if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
> - dev_err(dev, "MHI is not in activate state, PM state: %s\n",
> - to_mhi_pm_state_str(mhi_cntrl->pm_state));
> - read_unlock_bh(&mhi_cntrl->pm_lock);
>
> - return -EIO;
> - }
> + buf_info.v_addr = skb->data;
> + buf_info.cb_buf = skb;
> + buf_info.len = len;
>
> - /* we're in M3 or transitioning to M3 */
> - if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
> - mhi_trigger_resume(mhi_cntrl);
> + return mhi_queue(mhi_dev, &buf_info, dir, mflags);
> +}
> +EXPORT_SYMBOL_GPL(mhi_queue_skb);
>
> - /* Toggle wake to exit out of M2 */
> - mhi_cntrl->wake_toggle(mhi_cntrl);
> +int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
> + struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
> +{
> + struct mhi_buf_info buf_info = { };
>
> buf_info.p_addr = mhi_buf->dma_addr;
> buf_info.cb_buf = mhi_buf;
> buf_info.pre_mapped = true;
> buf_info.len = len;
>
> - ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
> - if (unlikely(ret)) {
> - read_unlock_bh(&mhi_cntrl->pm_lock);
> - return ret;
> - }
> -
> - if (mhi_chan->dir == DMA_TO_DEVICE)
> - atomic_inc(&mhi_cntrl->pending_pkts);
> -
> - if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
> - read_lock_bh(&mhi_chan->lock);
> - mhi_ring_chan_db(mhi_cntrl, mhi_chan);
> - read_unlock_bh(&mhi_chan->lock);
> - }
> -
> - read_unlock_bh(&mhi_cntrl->pm_lock);
> -
> - return 0;
> + return mhi_queue(mhi_dev, &buf_info, dir, mflags);
> }
> EXPORT_SYMBOL_GPL(mhi_queue_dma);
>
> @@ -1134,57 +1097,13 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
> int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
> void *buf, size_t len, enum mhi_flags mflags)
> {
> - struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
> - struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
> - mhi_dev->dl_chan;
> - struct mhi_ring *tre_ring;
> struct mhi_buf_info buf_info = { };
> - unsigned long flags;
> - int ret;
> -
> - /*
> - * this check here only as a guard, it's always
> - * possible mhi can enter error while executing rest of function,
> - * which is not fatal so we do not need to hold pm_lock
> - */
> - if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
> - return -EIO;
> -
> - tre_ring = &mhi_chan->tre_ring;
> - if (mhi_is_ring_full(mhi_cntrl, tre_ring))
> - return -ENOMEM;
>
> buf_info.v_addr = buf;
> buf_info.cb_buf = buf;
> buf_info.len = len;
>
> - ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
> - if (unlikely(ret))
> - return ret;
> -
> - read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
> -
> - /* we're in M3 or transitioning to M3 */
> - if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
> - mhi_trigger_resume(mhi_cntrl);
> -
> - /* Toggle wake to exit out of M2 */
> - mhi_cntrl->wake_toggle(mhi_cntrl);
> -
> - if (mhi_chan->dir == DMA_TO_DEVICE)
> - atomic_inc(&mhi_cntrl->pending_pkts);
> -
> - if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
> - unsigned long flags;
> -
> - read_lock_irqsave(&mhi_chan->lock, flags);
> - mhi_ring_chan_db(mhi_cntrl, mhi_chan);
> - read_unlock_irqrestore(&mhi_chan->lock, flags);
> - }
> -
> - read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
> -
> - return 0;
> + return mhi_queue(mhi_dev, &buf_info, dir, mflags);
> }
> EXPORT_SYMBOL_GPL(mhi_queue_buf);
>
> --
> 2.7.4
>
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2021-01-21 11:47 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-01-08 16:52 [PATCH v2] mhi: core: Factorize mhi queuing Loic Poulain
2021-01-21 11:19 ` Manivannan Sadhasivam
2021-01-21 11:23 ` Manivannan Sadhasivam
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.