All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] mhi: core: Factorize mhi queuing
@ 2020-12-10 11:31 Loic Poulain
  2021-01-07  2:45 ` Hemant Kumar
  2021-01-08 12:07 ` Manivannan Sadhasivam
  0 siblings, 2 replies; 4+ messages in thread
From: Loic Poulain @ 2020-12-10 11:31 UTC (permalink / raw)
  To: manivannan.sadhasivam, hemantk; +Cc: linux-arm-msm, Loic Poulain

Instead of duplicating queuing procedure in mhi_queue_dma(),
mhi_queue_buf() and mhi_queue_skb(), add a new generic mhi_queue()
as common helper.

Signed-off-by: Loic Poulain <loic.poulain@linaro.org>
---
 drivers/bus/mhi/core/main.c | 160 +++++++++++---------------------------------
 1 file changed, 38 insertions(+), 122 deletions(-)

diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
index 3871ef0..4fa4c88 100644
--- a/drivers/bus/mhi/core/main.c
+++ b/drivers/bus/mhi/core/main.c
@@ -963,118 +963,78 @@ static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
 	return (tmp == ring->rp);
 }
 
-int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
-		  struct sk_buff *skb, size_t len, enum mhi_flags mflags)
+static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
+		     enum dma_data_direction dir, enum mhi_flags mflags)
 {
 	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
 	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
 							     mhi_dev->dl_chan;
 	struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
-	struct mhi_buf_info buf_info = { };
+	unsigned long flags;
 	int ret;
 
-	/* If MHI host pre-allocates buffers then client drivers cannot queue */
-	if (mhi_chan->pre_alloc)
-		return -EINVAL;
+	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
+		return -EIO;
 
-	if (mhi_is_ring_full(mhi_cntrl, tre_ring))
-		return -ENOMEM;
+	read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
 
-	read_lock_bh(&mhi_cntrl->pm_lock);
-	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
-		read_unlock_bh(&mhi_cntrl->pm_lock);
-		return -EIO;
+	ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
+	if (unlikely(ret)) {
+		ret = -ENOMEM;
+		goto exit_unlock;
 	}
 
-	/* we're in M3 or transitioning to M3 */
+	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
+	if (unlikely(ret))
+		goto exit_unlock;
+
+	/* trigger M3 exit if necessary */
 	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
 		mhi_trigger_resume(mhi_cntrl);
 
-	/* Toggle wake to exit out of M2 */
+	/* Assert dev_wake (to exit/prevent M1/M2)*/
 	mhi_cntrl->wake_toggle(mhi_cntrl);
 
-	buf_info.v_addr = skb->data;
-	buf_info.cb_buf = skb;
-	buf_info.len = len;
-
-	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
-	if (unlikely(ret)) {
-		read_unlock_bh(&mhi_cntrl->pm_lock);
-		return ret;
-	}
-
 	if (mhi_chan->dir == DMA_TO_DEVICE)
 		atomic_inc(&mhi_cntrl->pending_pkts);
 
-	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
-		read_lock_bh(&mhi_chan->lock);
-		mhi_ring_chan_db(mhi_cntrl, mhi_chan);
-		read_unlock_bh(&mhi_chan->lock);
+	if (unlikely(!MHI_DB_ACCESS_VALID(mhi_cntrl))) {
+		ret = -EIO;
+		goto exit_unlock;
 	}
 
-	read_unlock_bh(&mhi_cntrl->pm_lock);
+	mhi_ring_chan_db(mhi_cntrl, mhi_chan);
 
-	return 0;
+exit_unlock:
+	read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
+
+	return ret;
 }
-EXPORT_SYMBOL_GPL(mhi_queue_skb);
 
-int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
-		  struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
+int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+		  struct sk_buff *skb, size_t len, enum mhi_flags mflags)
 {
-	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
-	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
-							     mhi_dev->dl_chan;
-	struct device *dev = &mhi_cntrl->mhi_dev->dev;
-	struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
 	struct mhi_buf_info buf_info = { };
-	int ret;
 
-	/* If MHI host pre-allocates buffers then client drivers cannot queue */
-	if (mhi_chan->pre_alloc)
-		return -EINVAL;
-
-	if (mhi_is_ring_full(mhi_cntrl, tre_ring))
-		return -ENOMEM;
-
-	read_lock_bh(&mhi_cntrl->pm_lock);
-	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
-		dev_err(dev, "MHI is not in activate state, PM state: %s\n",
-			to_mhi_pm_state_str(mhi_cntrl->pm_state));
-		read_unlock_bh(&mhi_cntrl->pm_lock);
-
-		return -EIO;
-	}
+	buf_info.v_addr = skb->data;
+	buf_info.cb_buf = skb;
+	buf_info.len = len;
 
-	/* we're in M3 or transitioning to M3 */
-	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
-		mhi_trigger_resume(mhi_cntrl);
+	return mhi_queue(mhi_dev, &buf_info, dir, mflags);
+}
+EXPORT_SYMBOL_GPL(mhi_queue_skb);
 
-	/* Toggle wake to exit out of M2 */
-	mhi_cntrl->wake_toggle(mhi_cntrl);
+int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+		  struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
+{
+	struct mhi_buf_info buf_info = { };
 
 	buf_info.p_addr = mhi_buf->dma_addr;
 	buf_info.cb_buf = mhi_buf;
 	buf_info.pre_mapped = true;
 	buf_info.len = len;
 
-	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
-	if (unlikely(ret)) {
-		read_unlock_bh(&mhi_cntrl->pm_lock);
-		return ret;
-	}
-
-	if (mhi_chan->dir == DMA_TO_DEVICE)
-		atomic_inc(&mhi_cntrl->pending_pkts);
-
-	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
-		read_lock_bh(&mhi_chan->lock);
-		mhi_ring_chan_db(mhi_cntrl, mhi_chan);
-		read_unlock_bh(&mhi_chan->lock);
-	}
-
-	read_unlock_bh(&mhi_cntrl->pm_lock);
-
-	return 0;
+	return mhi_queue(mhi_dev, &buf_info, dir, mflags);
 }
 EXPORT_SYMBOL_GPL(mhi_queue_dma);
 
@@ -1128,57 +1088,13 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
 int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
 		  void *buf, size_t len, enum mhi_flags mflags)
 {
-	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
-	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
-							     mhi_dev->dl_chan;
-	struct mhi_ring *tre_ring;
 	struct mhi_buf_info buf_info = { };
-	unsigned long flags;
-	int ret;
-
-	/*
-	 * this check here only as a guard, it's always
-	 * possible mhi can enter error while executing rest of function,
-	 * which is not fatal so we do not need to hold pm_lock
-	 */
-	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
-		return -EIO;
-
-	tre_ring = &mhi_chan->tre_ring;
-	if (mhi_is_ring_full(mhi_cntrl, tre_ring))
-		return -ENOMEM;
 
 	buf_info.v_addr = buf;
 	buf_info.cb_buf = buf;
 	buf_info.len = len;
 
-	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
-	if (unlikely(ret))
-		return ret;
-
-	read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
-
-	/* we're in M3 or transitioning to M3 */
-	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
-		mhi_trigger_resume(mhi_cntrl);
-
-	/* Toggle wake to exit out of M2 */
-	mhi_cntrl->wake_toggle(mhi_cntrl);
-
-	if (mhi_chan->dir == DMA_TO_DEVICE)
-		atomic_inc(&mhi_cntrl->pending_pkts);
-
-	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
-		unsigned long flags;
-
-		read_lock_irqsave(&mhi_chan->lock, flags);
-		mhi_ring_chan_db(mhi_cntrl, mhi_chan);
-		read_unlock_irqrestore(&mhi_chan->lock, flags);
-	}
-
-	read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
-
-	return 0;
+	return mhi_queue(mhi_dev, &buf_info, dir, mflags);
 }
 EXPORT_SYMBOL_GPL(mhi_queue_buf);
 
-- 
2.7.4


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] mhi: core: Factorize mhi queuing
  2020-12-10 11:31 [PATCH] mhi: core: Factorize mhi queuing Loic Poulain
@ 2021-01-07  2:45 ` Hemant Kumar
  2021-01-08 12:07 ` Manivannan Sadhasivam
  1 sibling, 0 replies; 4+ messages in thread
From: Hemant Kumar @ 2021-01-07  2:45 UTC (permalink / raw)
  To: Loic Poulain, manivannan.sadhasivam; +Cc: linux-arm-msm

Hi Loic,

On 12/10/20 3:31 AM, Loic Poulain wrote:
> Instead of duplicating queuing procedure in mhi_queue_dma(),
> mhi_queue_buf() and mhi_queue_skb(), add a new generic mhi_queue()
> as common helper.
> 
> Signed-off-by: Loic Poulain <loic.poulain@linaro.org>
> ---
>   drivers/bus/mhi/core/main.c | 160 +++++++++++---------------------------------
>   1 file changed, 38 insertions(+), 122 deletions(-)
> 
> diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
> index 3871ef0..4fa4c88 100644
> --- a/drivers/bus/mhi/core/main.c
> +++ b/drivers/bus/mhi/core/main.c
> @@ -963,118 +963,78 @@ static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
>   	return (tmp == ring->rp);
>   }
>   
> -int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
> -		  struct sk_buff *skb, size_t len, enum mhi_flags mflags)
> +static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
> +		     enum dma_data_direction dir, enum mhi_flags mflags)
>   {
>   	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
>   	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
>   							     mhi_dev->dl_chan;
>   	struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
> -	struct mhi_buf_info buf_info = { };
> +	unsigned long flags;
>   	int ret;
>   
> -	/* If MHI host pre-allocates buffers then client drivers cannot queue */
> -	if (mhi_chan->pre_alloc)
Are we getting rid of auto_queue ?
> -		return -EINVAL;
> +	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
> +		return -EIO;
>   
> -	if (mhi_is_ring_full(mhi_cntrl, tre_ring))
> -		return -ENOMEM;
> +	read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
This is something used in mhi_queue_buf and rest of the (_skb, _dma) 
used read_lock_bh. I see that now you are making it a re-interant as you
identified before :) So irq ver is disabling irq on the core this code 
is running and disabling preemption, where as bh ver is preemptiable. Is 
that the reason you chose irq instead of bh ?
[..]

Thanks,
Hemant
-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] mhi: core: Factorize mhi queuing
  2020-12-10 11:31 [PATCH] mhi: core: Factorize mhi queuing Loic Poulain
  2021-01-07  2:45 ` Hemant Kumar
@ 2021-01-08 12:07 ` Manivannan Sadhasivam
       [not found]   ` <CAMZdPi-kfYy0x3UL_Hdvj=2t=aqvvxkRrm_BN=K14dF5PzbxyA@mail.gmail.com>
  1 sibling, 1 reply; 4+ messages in thread
From: Manivannan Sadhasivam @ 2021-01-08 12:07 UTC (permalink / raw)
  To: Loic Poulain; +Cc: hemantk, linux-arm-msm

On Thu, Dec 10, 2020 at 12:31:32PM +0100, Loic Poulain wrote:
> Instead of duplicating queuing procedure in mhi_queue_dma(),
> mhi_queue_buf() and mhi_queue_skb(), add a new generic mhi_queue()
> as common helper.
> 

While doing the factoring, please don't make any additional changes. If you want
to do, then use incremental patches.

Thanks,
Mani

> Signed-off-by: Loic Poulain <loic.poulain@linaro.org>
> ---
>  drivers/bus/mhi/core/main.c | 160 +++++++++++---------------------------------
>  1 file changed, 38 insertions(+), 122 deletions(-)
> 
> diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
> index 3871ef0..4fa4c88 100644
> --- a/drivers/bus/mhi/core/main.c
> +++ b/drivers/bus/mhi/core/main.c
> @@ -963,118 +963,78 @@ static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
>  	return (tmp == ring->rp);
>  }
>  
> -int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
> -		  struct sk_buff *skb, size_t len, enum mhi_flags mflags)
> +static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
> +		     enum dma_data_direction dir, enum mhi_flags mflags)
>  {
>  	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
>  	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
>  							     mhi_dev->dl_chan;
>  	struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
> -	struct mhi_buf_info buf_info = { };
> +	unsigned long flags;
>  	int ret;
>  
> -	/* If MHI host pre-allocates buffers then client drivers cannot queue */
> -	if (mhi_chan->pre_alloc)
> -		return -EINVAL;
> +	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
> +		return -EIO;
>  
> -	if (mhi_is_ring_full(mhi_cntrl, tre_ring))
> -		return -ENOMEM;
> +	read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
>  
> -	read_lock_bh(&mhi_cntrl->pm_lock);
> -	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
> -		read_unlock_bh(&mhi_cntrl->pm_lock);
> -		return -EIO;
> +	ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
> +	if (unlikely(ret)) {
> +		ret = -ENOMEM;
> +		goto exit_unlock;
>  	}
>  
> -	/* we're in M3 or transitioning to M3 */
> +	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
> +	if (unlikely(ret))
> +		goto exit_unlock;
> +
> +	/* trigger M3 exit if necessary */
>  	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
>  		mhi_trigger_resume(mhi_cntrl);
>  
> -	/* Toggle wake to exit out of M2 */
> +	/* Assert dev_wake (to exit/prevent M1/M2)*/
>  	mhi_cntrl->wake_toggle(mhi_cntrl);
>  
> -	buf_info.v_addr = skb->data;
> -	buf_info.cb_buf = skb;
> -	buf_info.len = len;
> -
> -	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
> -	if (unlikely(ret)) {
> -		read_unlock_bh(&mhi_cntrl->pm_lock);
> -		return ret;
> -	}
> -
>  	if (mhi_chan->dir == DMA_TO_DEVICE)
>  		atomic_inc(&mhi_cntrl->pending_pkts);
>  
> -	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
> -		read_lock_bh(&mhi_chan->lock);
> -		mhi_ring_chan_db(mhi_cntrl, mhi_chan);
> -		read_unlock_bh(&mhi_chan->lock);
> +	if (unlikely(!MHI_DB_ACCESS_VALID(mhi_cntrl))) {
> +		ret = -EIO;
> +		goto exit_unlock;
>  	}
>  
> -	read_unlock_bh(&mhi_cntrl->pm_lock);
> +	mhi_ring_chan_db(mhi_cntrl, mhi_chan);
>  
> -	return 0;
> +exit_unlock:
> +	read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
> +
> +	return ret;
>  }
> -EXPORT_SYMBOL_GPL(mhi_queue_skb);
>  
> -int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
> -		  struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
> +int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
> +		  struct sk_buff *skb, size_t len, enum mhi_flags mflags)
>  {
> -	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
> -	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
> -							     mhi_dev->dl_chan;
> -	struct device *dev = &mhi_cntrl->mhi_dev->dev;
> -	struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
>  	struct mhi_buf_info buf_info = { };
> -	int ret;
>  
> -	/* If MHI host pre-allocates buffers then client drivers cannot queue */
> -	if (mhi_chan->pre_alloc)
> -		return -EINVAL;
> -
> -	if (mhi_is_ring_full(mhi_cntrl, tre_ring))
> -		return -ENOMEM;
> -
> -	read_lock_bh(&mhi_cntrl->pm_lock);
> -	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
> -		dev_err(dev, "MHI is not in activate state, PM state: %s\n",
> -			to_mhi_pm_state_str(mhi_cntrl->pm_state));
> -		read_unlock_bh(&mhi_cntrl->pm_lock);
> -
> -		return -EIO;
> -	}
> +	buf_info.v_addr = skb->data;
> +	buf_info.cb_buf = skb;
> +	buf_info.len = len;
>  
> -	/* we're in M3 or transitioning to M3 */
> -	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
> -		mhi_trigger_resume(mhi_cntrl);
> +	return mhi_queue(mhi_dev, &buf_info, dir, mflags);
> +}
> +EXPORT_SYMBOL_GPL(mhi_queue_skb);
>  
> -	/* Toggle wake to exit out of M2 */
> -	mhi_cntrl->wake_toggle(mhi_cntrl);
> +int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
> +		  struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
> +{
> +	struct mhi_buf_info buf_info = { };
>  
>  	buf_info.p_addr = mhi_buf->dma_addr;
>  	buf_info.cb_buf = mhi_buf;
>  	buf_info.pre_mapped = true;
>  	buf_info.len = len;
>  
> -	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
> -	if (unlikely(ret)) {
> -		read_unlock_bh(&mhi_cntrl->pm_lock);
> -		return ret;
> -	}
> -
> -	if (mhi_chan->dir == DMA_TO_DEVICE)
> -		atomic_inc(&mhi_cntrl->pending_pkts);
> -
> -	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
> -		read_lock_bh(&mhi_chan->lock);
> -		mhi_ring_chan_db(mhi_cntrl, mhi_chan);
> -		read_unlock_bh(&mhi_chan->lock);
> -	}
> -
> -	read_unlock_bh(&mhi_cntrl->pm_lock);
> -
> -	return 0;
> +	return mhi_queue(mhi_dev, &buf_info, dir, mflags);
>  }
>  EXPORT_SYMBOL_GPL(mhi_queue_dma);
>  
> @@ -1128,57 +1088,13 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
>  int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
>  		  void *buf, size_t len, enum mhi_flags mflags)
>  {
> -	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
> -	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
> -							     mhi_dev->dl_chan;
> -	struct mhi_ring *tre_ring;
>  	struct mhi_buf_info buf_info = { };
> -	unsigned long flags;
> -	int ret;
> -
> -	/*
> -	 * this check here only as a guard, it's always
> -	 * possible mhi can enter error while executing rest of function,
> -	 * which is not fatal so we do not need to hold pm_lock
> -	 */
> -	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
> -		return -EIO;
> -
> -	tre_ring = &mhi_chan->tre_ring;
> -	if (mhi_is_ring_full(mhi_cntrl, tre_ring))
> -		return -ENOMEM;
>  
>  	buf_info.v_addr = buf;
>  	buf_info.cb_buf = buf;
>  	buf_info.len = len;
>  
> -	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
> -	if (unlikely(ret))
> -		return ret;
> -
> -	read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
> -
> -	/* we're in M3 or transitioning to M3 */
> -	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
> -		mhi_trigger_resume(mhi_cntrl);
> -
> -	/* Toggle wake to exit out of M2 */
> -	mhi_cntrl->wake_toggle(mhi_cntrl);
> -
> -	if (mhi_chan->dir == DMA_TO_DEVICE)
> -		atomic_inc(&mhi_cntrl->pending_pkts);
> -
> -	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
> -		unsigned long flags;
> -
> -		read_lock_irqsave(&mhi_chan->lock, flags);
> -		mhi_ring_chan_db(mhi_cntrl, mhi_chan);
> -		read_unlock_irqrestore(&mhi_chan->lock, flags);
> -	}
> -
> -	read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
> -
> -	return 0;
> +	return mhi_queue(mhi_dev, &buf_info, dir, mflags);
>  }
>  EXPORT_SYMBOL_GPL(mhi_queue_buf);
>  
> -- 
> 2.7.4
> 

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] mhi: core: Factorize mhi queuing
       [not found]   ` <CAMZdPi-kfYy0x3UL_Hdvj=2t=aqvvxkRrm_BN=K14dF5PzbxyA@mail.gmail.com>
@ 2021-01-08 15:18     ` Manivannan Sadhasivam
  0 siblings, 0 replies; 4+ messages in thread
From: Manivannan Sadhasivam @ 2021-01-08 15:18 UTC (permalink / raw)
  To: Loic Poulain; +Cc: Hemant Kumar, linux-arm-msm

On Fri, Jan 08, 2021 at 04:10:11PM +0100, Loic Poulain wrote:
> Hi Mani,
> 
> On Fri, 8 Jan 2021 at 13:07, Manivannan Sadhasivam <
> manivannan.sadhasivam@linaro.org> wrote:
> 
> > On Thu, Dec 10, 2020 at 12:31:32PM +0100, Loic Poulain wrote:
> > > Instead of duplicating queuing procedure in mhi_queue_dma(),
> > > mhi_queue_buf() and mhi_queue_skb(), add a new generic mhi_queue()
> > > as common helper.
> > >
> >
> > While doing the factoring, please don't make any additional changes. If
> > you want
> > to do, then use incremental patches.
> >
> 
> Well, I've not really performed additional change, I've just aligned all
> mhi_queue*() to use _irq for locking, but that was already the one used in
> mhi_queue_buf(), mhi_queue_dma and mhi_queue_skb now use it as well
> (instead bh), but it's because of the factoring. This has been tested
> carefully.
> 

And you have removed "pre_alloc" check... Either mention all the changes
in the commit message or do it incrementally.

Thanks,
Mani

> Regards,
> Loic

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2021-01-08 15:19 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-12-10 11:31 [PATCH] mhi: core: Factorize mhi queuing Loic Poulain
2021-01-07  2:45 ` Hemant Kumar
2021-01-08 12:07 ` Manivannan Sadhasivam
     [not found]   ` <CAMZdPi-kfYy0x3UL_Hdvj=2t=aqvvxkRrm_BN=K14dF5PzbxyA@mail.gmail.com>
2021-01-08 15:18     ` Manivannan Sadhasivam

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.