All of lore.kernel.org
 help / color / mirror / Atom feed
From: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
To: Loic Poulain <loic.poulain@linaro.org>
Cc: hemantk@codeaurora.org, linux-arm-msm@vger.kernel.org
Subject: Re: [PATCH] mhi: core: Factorize mhi queuing
Date: Fri, 8 Jan 2021 17:37:22 +0530	[thread overview]
Message-ID: <20210108120722.GC74017@thinkpad> (raw)
In-Reply-To: <1607599892-6229-1-git-send-email-loic.poulain@linaro.org>

On Thu, Dec 10, 2020 at 12:31:32PM +0100, Loic Poulain wrote:
> Instead of duplicating queuing procedure in mhi_queue_dma(),
> mhi_queue_buf() and mhi_queue_skb(), add a new generic mhi_queue()
> as common helper.
> 

While doing the factoring, please don't make any additional changes. If you want
to do, then use incremental patches.

Thanks,
Mani

> Signed-off-by: Loic Poulain <loic.poulain@linaro.org>
> ---
>  drivers/bus/mhi/core/main.c | 160 +++++++++++---------------------------------
>  1 file changed, 38 insertions(+), 122 deletions(-)
> 
> diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
> index 3871ef0..4fa4c88 100644
> --- a/drivers/bus/mhi/core/main.c
> +++ b/drivers/bus/mhi/core/main.c
> @@ -963,118 +963,78 @@ static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
>  	return (tmp == ring->rp);
>  }
>  
> -int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
> -		  struct sk_buff *skb, size_t len, enum mhi_flags mflags)
> +static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
> +		     enum dma_data_direction dir, enum mhi_flags mflags)
>  {
>  	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
>  	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
>  							     mhi_dev->dl_chan;
>  	struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
> -	struct mhi_buf_info buf_info = { };
> +	unsigned long flags;
>  	int ret;
>  
> -	/* If MHI host pre-allocates buffers then client drivers cannot queue */
> -	if (mhi_chan->pre_alloc)
> -		return -EINVAL;
> +	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
> +		return -EIO;
>  
> -	if (mhi_is_ring_full(mhi_cntrl, tre_ring))
> -		return -ENOMEM;
> +	read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
>  
> -	read_lock_bh(&mhi_cntrl->pm_lock);
> -	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
> -		read_unlock_bh(&mhi_cntrl->pm_lock);
> -		return -EIO;
> +	ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
> +	if (unlikely(ret)) {
> +		ret = -ENOMEM;
> +		goto exit_unlock;
>  	}
>  
> -	/* we're in M3 or transitioning to M3 */
> +	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
> +	if (unlikely(ret))
> +		goto exit_unlock;
> +
> +	/* trigger M3 exit if necessary */
>  	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
>  		mhi_trigger_resume(mhi_cntrl);
>  
> -	/* Toggle wake to exit out of M2 */
> +	/* Assert dev_wake (to exit/prevent M1/M2)*/
>  	mhi_cntrl->wake_toggle(mhi_cntrl);
>  
> -	buf_info.v_addr = skb->data;
> -	buf_info.cb_buf = skb;
> -	buf_info.len = len;
> -
> -	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
> -	if (unlikely(ret)) {
> -		read_unlock_bh(&mhi_cntrl->pm_lock);
> -		return ret;
> -	}
> -
>  	if (mhi_chan->dir == DMA_TO_DEVICE)
>  		atomic_inc(&mhi_cntrl->pending_pkts);
>  
> -	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
> -		read_lock_bh(&mhi_chan->lock);
> -		mhi_ring_chan_db(mhi_cntrl, mhi_chan);
> -		read_unlock_bh(&mhi_chan->lock);
> +	if (unlikely(!MHI_DB_ACCESS_VALID(mhi_cntrl))) {
> +		ret = -EIO;
> +		goto exit_unlock;
>  	}
>  
> -	read_unlock_bh(&mhi_cntrl->pm_lock);
> +	mhi_ring_chan_db(mhi_cntrl, mhi_chan);
>  
> -	return 0;
> +exit_unlock:
> +	read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
> +
> +	return ret;
>  }
> -EXPORT_SYMBOL_GPL(mhi_queue_skb);
>  
> -int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
> -		  struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
> +int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
> +		  struct sk_buff *skb, size_t len, enum mhi_flags mflags)
>  {
> -	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
> -	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
> -							     mhi_dev->dl_chan;
> -	struct device *dev = &mhi_cntrl->mhi_dev->dev;
> -	struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
>  	struct mhi_buf_info buf_info = { };
> -	int ret;
>  
> -	/* If MHI host pre-allocates buffers then client drivers cannot queue */
> -	if (mhi_chan->pre_alloc)
> -		return -EINVAL;
> -
> -	if (mhi_is_ring_full(mhi_cntrl, tre_ring))
> -		return -ENOMEM;
> -
> -	read_lock_bh(&mhi_cntrl->pm_lock);
> -	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
> -		dev_err(dev, "MHI is not in activate state, PM state: %s\n",
> -			to_mhi_pm_state_str(mhi_cntrl->pm_state));
> -		read_unlock_bh(&mhi_cntrl->pm_lock);
> -
> -		return -EIO;
> -	}
> +	buf_info.v_addr = skb->data;
> +	buf_info.cb_buf = skb;
> +	buf_info.len = len;
>  
> -	/* we're in M3 or transitioning to M3 */
> -	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
> -		mhi_trigger_resume(mhi_cntrl);
> +	return mhi_queue(mhi_dev, &buf_info, dir, mflags);
> +}
> +EXPORT_SYMBOL_GPL(mhi_queue_skb);
>  
> -	/* Toggle wake to exit out of M2 */
> -	mhi_cntrl->wake_toggle(mhi_cntrl);
> +int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
> +		  struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
> +{
> +	struct mhi_buf_info buf_info = { };
>  
>  	buf_info.p_addr = mhi_buf->dma_addr;
>  	buf_info.cb_buf = mhi_buf;
>  	buf_info.pre_mapped = true;
>  	buf_info.len = len;
>  
> -	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
> -	if (unlikely(ret)) {
> -		read_unlock_bh(&mhi_cntrl->pm_lock);
> -		return ret;
> -	}
> -
> -	if (mhi_chan->dir == DMA_TO_DEVICE)
> -		atomic_inc(&mhi_cntrl->pending_pkts);
> -
> -	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
> -		read_lock_bh(&mhi_chan->lock);
> -		mhi_ring_chan_db(mhi_cntrl, mhi_chan);
> -		read_unlock_bh(&mhi_chan->lock);
> -	}
> -
> -	read_unlock_bh(&mhi_cntrl->pm_lock);
> -
> -	return 0;
> +	return mhi_queue(mhi_dev, &buf_info, dir, mflags);
>  }
>  EXPORT_SYMBOL_GPL(mhi_queue_dma);
>  
> @@ -1128,57 +1088,13 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
>  int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
>  		  void *buf, size_t len, enum mhi_flags mflags)
>  {
> -	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
> -	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
> -							     mhi_dev->dl_chan;
> -	struct mhi_ring *tre_ring;
>  	struct mhi_buf_info buf_info = { };
> -	unsigned long flags;
> -	int ret;
> -
> -	/*
> -	 * this check here only as a guard, it's always
> -	 * possible mhi can enter error while executing rest of function,
> -	 * which is not fatal so we do not need to hold pm_lock
> -	 */
> -	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
> -		return -EIO;
> -
> -	tre_ring = &mhi_chan->tre_ring;
> -	if (mhi_is_ring_full(mhi_cntrl, tre_ring))
> -		return -ENOMEM;
>  
>  	buf_info.v_addr = buf;
>  	buf_info.cb_buf = buf;
>  	buf_info.len = len;
>  
> -	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
> -	if (unlikely(ret))
> -		return ret;
> -
> -	read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
> -
> -	/* we're in M3 or transitioning to M3 */
> -	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
> -		mhi_trigger_resume(mhi_cntrl);
> -
> -	/* Toggle wake to exit out of M2 */
> -	mhi_cntrl->wake_toggle(mhi_cntrl);
> -
> -	if (mhi_chan->dir == DMA_TO_DEVICE)
> -		atomic_inc(&mhi_cntrl->pending_pkts);
> -
> -	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
> -		unsigned long flags;
> -
> -		read_lock_irqsave(&mhi_chan->lock, flags);
> -		mhi_ring_chan_db(mhi_cntrl, mhi_chan);
> -		read_unlock_irqrestore(&mhi_chan->lock, flags);
> -	}
> -
> -	read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
> -
> -	return 0;
> +	return mhi_queue(mhi_dev, &buf_info, dir, mflags);
>  }
>  EXPORT_SYMBOL_GPL(mhi_queue_buf);
>  
> -- 
> 2.7.4
> 

  parent reply	other threads:[~2021-01-08 12:08 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-10 11:31 [PATCH] mhi: core: Factorize mhi queuing Loic Poulain
2021-01-07  2:45 ` Hemant Kumar
2021-01-08 12:07 ` Manivannan Sadhasivam [this message]
     [not found]   ` <CAMZdPi-kfYy0x3UL_Hdvj=2t=aqvvxkRrm_BN=K14dF5PzbxyA@mail.gmail.com>
2021-01-08 15:18     ` Manivannan Sadhasivam

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210108120722.GC74017@thinkpad \
    --to=manivannan.sadhasivam@linaro.org \
    --cc=hemantk@codeaurora.org \
    --cc=linux-arm-msm@vger.kernel.org \
    --cc=loic.poulain@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.