linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Vinod Koul <vinod.koul@intel.com>
To: Anup Patel <anup.patel@broadcom.com>
Cc: Rob Herring <robh+dt@kernel.org>,
	Mark Rutland <mark.rutland@arm.com>,
	Dan Williams <dan.j.williams@intel.com>,
	Florian Fainelli <f.fainelli@gmail.com>,
	Scott Branden <sbranden@broadcom.com>,
	Ray Jui <rjui@broadcom.com>,
	linux-kernel@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org, devicetree@vger.kernel.org,
	dmaengine@vger.kernel.org, bcm-kernel-feedback-list@broadcom.com
Subject: Re: [PATCH v2 03/16] dmaengine: bcm-sba-raid: Common flags for sba_request state and fence
Date: Thu, 17 Aug 2017 09:15:34 +0530	[thread overview]
Message-ID: <20170817034534.GY3053@localhost> (raw)
In-Reply-To: <1501583880-32072-4-git-send-email-anup.patel@broadcom.com>

On Tue, Aug 01, 2017 at 04:07:47PM +0530, Anup Patel wrote:
> This patch merges sba_request state and fence into common
> sba_request flags. Also, in-future we can extend sba_request
> flags as required.

and it also changes the flag values to bits, which I have no idea why that
was done, care to explain that please...

> 
> Signed-off-by: Anup Patel <anup.patel@broadcom.com>
> ---
>  drivers/dma/bcm-sba-raid.c | 66 ++++++++++++++++++++++++++--------------------
>  1 file changed, 38 insertions(+), 28 deletions(-)
> 
> diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c
> index f81d5ac..6fa3df1 100644
> --- a/drivers/dma/bcm-sba-raid.c
> +++ b/drivers/dma/bcm-sba-raid.c
> @@ -91,22 +91,23 @@
>  
>  /* ===== Driver data structures ===== */
>  
> -enum sba_request_state {
> -	SBA_REQUEST_STATE_FREE = 1,
> -	SBA_REQUEST_STATE_ALLOCED = 2,
> -	SBA_REQUEST_STATE_PENDING = 3,
> -	SBA_REQUEST_STATE_ACTIVE = 4,
> -	SBA_REQUEST_STATE_RECEIVED = 5,
> -	SBA_REQUEST_STATE_COMPLETED = 6,
> -	SBA_REQUEST_STATE_ABORTED = 7,
> +enum sba_request_flags {
> +	SBA_REQUEST_STATE_FREE		= 0x001,
> +	SBA_REQUEST_STATE_ALLOCED	= 0x002,
> +	SBA_REQUEST_STATE_PENDING	= 0x004,
> +	SBA_REQUEST_STATE_ACTIVE	= 0x008,
> +	SBA_REQUEST_STATE_RECEIVED	= 0x010,
> +	SBA_REQUEST_STATE_COMPLETED	= 0x020,
> +	SBA_REQUEST_STATE_ABORTED	= 0x040,
> +	SBA_REQUEST_STATE_MASK		= 0x0ff,
> +	SBA_REQUEST_FENCE		= 0x100,
>  };
>  
>  struct sba_request {
>  	/* Global state */
>  	struct list_head node;
>  	struct sba_device *sba;
> -	enum sba_request_state state;
> -	bool fence;
> +	u32 flags;
>  	/* Chained requests management */
>  	struct sba_request *first;
>  	struct list_head next;
> @@ -217,8 +218,7 @@ static struct sba_request *sba_alloc_request(struct sba_device *sba)
>  	if (!req)
>  		return NULL;
>  
> -	req->state = SBA_REQUEST_STATE_ALLOCED;
> -	req->fence = false;
> +	req->flags = SBA_REQUEST_STATE_ALLOCED;
>  	req->first = req;
>  	INIT_LIST_HEAD(&req->next);
>  	req->next_count = 1;
> @@ -234,7 +234,8 @@ static void _sba_pending_request(struct sba_device *sba,
>  				 struct sba_request *req)
>  {
>  	lockdep_assert_held(&sba->reqs_lock);
> -	req->state = SBA_REQUEST_STATE_PENDING;
> +	req->flags &= ~SBA_REQUEST_STATE_MASK;
> +	req->flags |= SBA_REQUEST_STATE_PENDING;
>  	list_move_tail(&req->node, &sba->reqs_pending_list);
>  	if (list_empty(&sba->reqs_active_list))
>  		sba->reqs_fence = false;
> @@ -249,9 +250,10 @@ static bool _sba_active_request(struct sba_device *sba,
>  		sba->reqs_fence = false;
>  	if (sba->reqs_fence)
>  		return false;
> -	req->state = SBA_REQUEST_STATE_ACTIVE;
> +	req->flags &= ~SBA_REQUEST_STATE_MASK;
> +	req->flags |= SBA_REQUEST_STATE_ACTIVE;
>  	list_move_tail(&req->node, &sba->reqs_active_list);
> -	if (req->fence)
> +	if (req->flags & SBA_REQUEST_FENCE)
>  		sba->reqs_fence = true;
>  	return true;
>  }
> @@ -261,7 +263,8 @@ static void _sba_abort_request(struct sba_device *sba,
>  			       struct sba_request *req)
>  {
>  	lockdep_assert_held(&sba->reqs_lock);
> -	req->state = SBA_REQUEST_STATE_ABORTED;
> +	req->flags &= ~SBA_REQUEST_STATE_MASK;
> +	req->flags |= SBA_REQUEST_STATE_ABORTED;
>  	list_move_tail(&req->node, &sba->reqs_aborted_list);
>  	if (list_empty(&sba->reqs_active_list))
>  		sba->reqs_fence = false;
> @@ -272,7 +275,8 @@ static void _sba_free_request(struct sba_device *sba,
>  			      struct sba_request *req)
>  {
>  	lockdep_assert_held(&sba->reqs_lock);
> -	req->state = SBA_REQUEST_STATE_FREE;
> +	req->flags &= ~SBA_REQUEST_STATE_MASK;
> +	req->flags |= SBA_REQUEST_STATE_FREE;
>  	list_move_tail(&req->node, &sba->reqs_free_list);
>  	if (list_empty(&sba->reqs_active_list))
>  		sba->reqs_fence = false;
> @@ -285,7 +289,8 @@ static void sba_received_request(struct sba_request *req)
>  	struct sba_device *sba = req->sba;
>  
>  	spin_lock_irqsave(&sba->reqs_lock, flags);
> -	req->state = SBA_REQUEST_STATE_RECEIVED;
> +	req->flags &= ~SBA_REQUEST_STATE_MASK;
> +	req->flags |= SBA_REQUEST_STATE_RECEIVED;
>  	list_move_tail(&req->node, &sba->reqs_received_list);
>  	spin_unlock_irqrestore(&sba->reqs_lock, flags);
>  }
> @@ -298,10 +303,12 @@ static void sba_complete_chained_requests(struct sba_request *req)
>  
>  	spin_lock_irqsave(&sba->reqs_lock, flags);
>  
> -	req->state = SBA_REQUEST_STATE_COMPLETED;
> +	req->flags &= ~SBA_REQUEST_STATE_MASK;
> +	req->flags |= SBA_REQUEST_STATE_COMPLETED;
>  	list_move_tail(&req->node, &sba->reqs_completed_list);
>  	list_for_each_entry(nreq, &req->next, next) {
> -		nreq->state = SBA_REQUEST_STATE_COMPLETED;
> +		nreq->flags &= ~SBA_REQUEST_STATE_MASK;
> +		nreq->flags |= SBA_REQUEST_STATE_COMPLETED;
>  		list_move_tail(&nreq->node, &sba->reqs_completed_list);
>  	}
>  	if (list_empty(&sba->reqs_active_list))
> @@ -576,7 +583,7 @@ sba_prep_dma_interrupt(struct dma_chan *dchan, unsigned long flags)
>  	 * Force fence so that no requests are submitted
>  	 * until DMA callback for this request is invoked.
>  	 */
> -	req->fence = true;
> +	req->flags |= SBA_REQUEST_FENCE;
>  
>  	/* Fillup request message */
>  	sba_fillup_interrupt_msg(req, req->cmds, &req->msg);
> @@ -659,7 +666,8 @@ sba_prep_dma_memcpy_req(struct sba_device *sba,
>  	req = sba_alloc_request(sba);
>  	if (!req)
>  		return NULL;
> -	req->fence = (flags & DMA_PREP_FENCE) ? true : false;
> +	if (flags & DMA_PREP_FENCE)
> +		req->flags |= SBA_REQUEST_FENCE;
>  
>  	/* Fillup request message */
>  	sba_fillup_memcpy_msg(req, req->cmds, &req->msg,
> @@ -796,7 +804,8 @@ sba_prep_dma_xor_req(struct sba_device *sba,
>  	req = sba_alloc_request(sba);
>  	if (!req)
>  		return NULL;
> -	req->fence = (flags & DMA_PREP_FENCE) ? true : false;
> +	if (flags & DMA_PREP_FENCE)
> +		req->flags |= SBA_REQUEST_FENCE;
>  
>  	/* Fillup request message */
>  	sba_fillup_xor_msg(req, req->cmds, &req->msg,
> @@ -1005,7 +1014,8 @@ sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off,
>  	req = sba_alloc_request(sba);
>  	if (!req)
>  		return NULL;
> -	req->fence = (flags & DMA_PREP_FENCE) ? true : false;
> +	if (flags & DMA_PREP_FENCE)
> +		req->flags |= SBA_REQUEST_FENCE;
>  
>  	/* Fillup request messages */
>  	sba_fillup_pq_msg(req, dmaf_continue(flags),
> @@ -1258,7 +1268,8 @@ sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off,
>  	req = sba_alloc_request(sba);
>  	if (!req)
>  		return NULL;
> -	req->fence = (flags & DMA_PREP_FENCE) ? true : false;
> +	if (flags & DMA_PREP_FENCE)
> +		req->flags |= SBA_REQUEST_FENCE;
>  
>  	/* Fillup request messages */
>  	sba_fillup_pq_single_msg(req,  dmaf_continue(flags),
> @@ -1425,7 +1436,7 @@ static void sba_receive_message(struct mbox_client *cl, void *msg)
>  	req = req->first;
>  
>  	/* Update request */
> -	if (req->state == SBA_REQUEST_STATE_RECEIVED)
> +	if (req->flags & SBA_REQUEST_STATE_RECEIVED)
>  		sba_dma_tx_actions(req);
>  	else
>  		sba_free_chained_requests(req);
> @@ -1488,11 +1499,10 @@ static int sba_prealloc_channel_resources(struct sba_device *sba)
>  		req = &sba->reqs[i];
>  		INIT_LIST_HEAD(&req->node);
>  		req->sba = sba;
> -		req->state = SBA_REQUEST_STATE_FREE;
> +		req->flags = SBA_REQUEST_STATE_FREE;
>  		INIT_LIST_HEAD(&req->next);
>  		req->next_count = 1;
>  		atomic_set(&req->next_pending_count, 0);
> -		req->fence = false;
>  		req->resp = sba->resp_base + p;
>  		req->resp_dma = sba->resp_dma_base + p;
>  		p += sba->hw_resp_size;
> -- 
> 2.7.4
> 

-- 
~Vinod

  reply	other threads:[~2017-08-17  3:42 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-08-01 10:37 [PATCH v2 00/16] Broadcom SBA-RAID driver improvements Anup Patel
2017-08-01 10:37 ` [PATCH v2 01/16] dmaengine: bcm-sba-raid: Minor improvments in comments Anup Patel
2017-08-17  3:44   ` Vinod Koul
2017-08-18  4:54     ` Anup Patel
2017-08-01 10:37 ` [PATCH v2 02/16] dmaengine: bcm-sba-raid: Reduce locking context in sba_alloc_request() Anup Patel
2017-08-01 10:37 ` [PATCH v2 03/16] dmaengine: bcm-sba-raid: Common flags for sba_request state and fence Anup Patel
2017-08-17  3:45   ` Vinod Koul [this message]
2017-08-18  4:56     ` Anup Patel
2017-08-18  5:25       ` Vinod Koul
2017-08-18  6:05         ` Anup Patel
2017-08-01 10:37 ` [PATCH v2 04/16] dmaengine: bcm-sba-raid: Remove redundant next_count from sba_request Anup Patel
2017-08-01 10:37 ` [PATCH v2 05/16] dmaengine: bcm-sba-raid: Remove redundant resp_dma " Anup Patel
2017-08-01 10:37 ` [PATCH v2 06/16] dmaengine: bcm-sba-raid: Remove reqs_free_count from sba_device Anup Patel
2017-08-01 10:37 ` [PATCH v2 07/16] dmaengine: bcm-sba-raid: Allow arbitrary number free sba_request Anup Patel
2017-08-01 10:37 ` [PATCH v2 08/16] dmaengine: bcm-sba-raid: Increase number of " Anup Patel
2017-08-01 10:37 ` [PATCH v2 09/16] dmaengine: bcm-sba-raid: Improve sba_issue_pending() run duration Anup Patel
2017-08-17  6:36   ` Vinod Koul
2017-08-18  6:12     ` Anup Patel
2017-08-01 10:37 ` [PATCH v2 10/16] dmaengine: bcm-sba-raid: Alloc resources before registering DMA device Anup Patel
2017-08-17  6:38   ` Vinod Koul
2017-08-18  5:01     ` Anup Patel
2017-08-01 10:37 ` [PATCH v2 11/16] dmaengine: bcm-sba-raid: Peek mbox when we have no free requests Anup Patel
2017-08-17  6:40   ` Vinod Koul
2017-08-18 11:36     ` Anup Patel
2017-08-01 10:37 ` [PATCH v2 12/16] dmaengine: bcm-sba-raid: Pre-ack async tx descriptor Anup Patel
2017-08-01 10:37 ` [PATCH v2 13/16] dmaengine: bcm-sba-raid: Re-factor sba_process_deferred_requests() Anup Patel
2017-08-01 10:37 ` [PATCH v2 14/16] dmaengine: bcm-sba-raid: Remove redundant SBA_REQUEST_STATE_RECEIVED Anup Patel
2017-08-01 10:37 ` [PATCH v2 15/16] dmaengine: bcm-sba-raid: Add debugfs support Anup Patel
2017-08-17  8:01   ` Vinod Koul
2017-08-18  5:03     ` Anup Patel
2017-08-18  5:26       ` Vinod Koul
2017-08-18  5:25         ` Anup Patel
2017-09-07 19:37         ` Greg KH
2017-09-08  4:09           ` Vinod Koul
2017-08-01 10:38 ` [PATCH v2 16/16] dmaengine: bcm-sba-raid: Explicitly ACK mailbox message after sending Anup Patel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170817034534.GY3053@localhost \
    --to=vinod.koul@intel.com \
    --cc=anup.patel@broadcom.com \
    --cc=bcm-kernel-feedback-list@broadcom.com \
    --cc=dan.j.williams@intel.com \
    --cc=devicetree@vger.kernel.org \
    --cc=dmaengine@vger.kernel.org \
    --cc=f.fainelli@gmail.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mark.rutland@arm.com \
    --cc=rjui@broadcom.com \
    --cc=robh+dt@kernel.org \
    --cc=sbranden@broadcom.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).