All of lore.kernel.org
 help / color / mirror / Atom feed
From: Joe Perches <joe@perches.com>
To: Jeff Kirsher <jeffrey.t.kirsher@intel.com>, davem@davemloft.net
Cc: Alice Michael <alice.michael@intel.com>,
	netdev@vger.kernel.org, nhorman@redhat.com, sassmann@redhat.com,
	Alan Brady <alan.brady@intel.com>,
	Phani Burra <phani.r.burra@intel.com>,
	Joshua Hay <joshua.a.hay@intel.com>,
	Madhu Chittim <madhu.chittim@intel.com>,
	Pavan Kumar Linga <pavan.kumar.linga@intel.com>,
	Donald Skidmore <donald.c.skidmore@intel.com>,
	Jesse Brandeburg <jesse.brandeburg@intel.com>,
	Sridhar Samudrala <sridhar.samudrala@intel.com>
Subject: Re: [net-next v3 06/15] iecm: Implement mailbox functionality
Date: Thu, 25 Jun 2020 19:57:31 -0700	[thread overview]
Message-ID: <b2305a5aaefdd64630a6b99c7b46397ccb029fd9.camel@perches.com> (raw)
In-Reply-To: <20200626020737.775377-7-jeffrey.t.kirsher@intel.com>

On Thu, 2020-06-25 at 19:07 -0700, Jeff Kirsher wrote:
> From: Alice Michael <alice.michael@intel.com>
> 
> Implement mailbox setup, take down, and commands.
[]
> diff --git a/drivers/net/ethernet/intel/iecm/iecm_controlq.c b/drivers/net/ethernet/intel/iecm/iecm_controlq.c
[]
> @@ -73,7 +142,74 @@ enum iecm_status iecm_ctlq_add(struct iecm_hw *hw,
>  			       struct iecm_ctlq_create_info *qinfo,
>  			       struct iecm_ctlq_info **cq)

Multiple functions using **cp and *cp can be error prone.

>  {
> -	/* stub */
> +	enum iecm_status status = 0;
> +	bool is_rxq = false;
> +
> +	if (!qinfo->len || !qinfo->buf_size ||
> +	    qinfo->len > IECM_CTLQ_MAX_RING_SIZE ||
> +	    qinfo->buf_size > IECM_CTLQ_MAX_BUF_LEN)
> +		return IECM_ERR_CFG;
> +
> +	*cq = kcalloc(1, sizeof(struct iecm_ctlq_info), GFP_KERNEL);
> +	if (!(*cq))
> +		return IECM_ERR_NO_MEMORY;

pity there is an iecm_status and not just a generic -ENOMEM.
Is there much value in the separate enum?

[]
@@ -152,7 +388,58 @@ enum iecm_status iecm_ctlq_clean_sq(struct iecm_ctlq_info *cq,
>  				    u16 *clean_count,
>  				    struct iecm_ctlq_msg *msg_status[])
>  {
> -	/* stub */
> +	enum iecm_status ret = 0;
> +	struct iecm_ctlq_desc *desc;
> +	u16 i = 0, num_to_clean;
> +	u16 ntc, desc_err;
> +
> +	if (!cq || !cq->ring_size)
> +		return IECM_ERR_CTLQ_EMPTY;
> +
> +	if (*clean_count == 0)
> +		return 0;
> +	else if (*clean_count > cq->ring_size)
> +		return IECM_ERR_PARAM;

unnecessary else

> +
> +	mutex_lock(&cq->cq_lock);
> +
> +	ntc = cq->next_to_clean;
> +
> +	num_to_clean = *clean_count;
> +
> +	for (i = 0; i < num_to_clean; i++) {
> +		/* Fetch next descriptor and check if marked as done */
> +		desc = IECM_CTLQ_DESC(cq, ntc);
> +		if (!(le16_to_cpu(desc->flags) & IECM_CTLQ_FLAG_DD))
> +			break;
> +
> +		desc_err = le16_to_cpu(desc->ret_val);
> +		if (desc_err) {
> +			/* strip off FW internal code */
> +			desc_err &= 0xff;
> +		}
> +
> +		msg_status[i] = cq->bi.tx_msg[ntc];
> +		msg_status[i]->status = desc_err;
> +
> +		cq->bi.tx_msg[ntc] = NULL;
> +
> +		/* Zero out any stale data */
> +		memset(desc, 0, sizeof(*desc));
> +
> +		ntc++;
> +		if (ntc == cq->ring_size)
> +			ntc = 0;
> +	}
> +
> +	cq->next_to_clean = ntc;
> +
> +	mutex_unlock(&cq->cq_lock);
> +
> +	/* Return number of descriptors actually cleaned */
> +	*clean_count = i;
> +
> +	return ret;
>  }
>  
>  /**
> @@ -175,7 +462,111 @@ enum iecm_status iecm_ctlq_post_rx_buffs(struct iecm_hw *hw,
>  					 u16 *buff_count,
>  					 struct iecm_dma_mem **buffs)
>  {
> -	/* stub */
> +	enum iecm_status status = 0;
> +	struct iecm_ctlq_desc *desc;
> +	u16 ntp = cq->next_to_post;
> +	bool buffs_avail = false;
> +	u16 tbp = ntp + 1;
> +	int i = 0;
> +
> +	if (*buff_count > cq->ring_size)
> +		return IECM_ERR_PARAM;
> +
> +	if (*buff_count > 0)
> +		buffs_avail = true;
> +
> +	mutex_lock(&cq->cq_lock);
> +
> +	if (tbp >= cq->ring_size)
> +		tbp = 0;
> +
> +	if (tbp == cq->next_to_clean)
> +		/* Nothing to do */
> +		goto post_buffs_out;
> +
> +	/* Post buffers for as many as provided or up until the last one used */
> +	while (ntp != cq->next_to_clean) {
> +		desc = IECM_CTLQ_DESC(cq, ntp);
> +
> +		if (!cq->bi.rx_buff[ntp]) {

		if (cq->bi.rx_buff[ntp])
			continue;

and unindent?

> +			if (!buffs_avail) {
> +				/* If the caller hasn't given us any buffers or
> +				 * there are none left, search the ring itself
> +				 * for an available buffer to move to this
> +				 * entry starting at the next entry in the ring
> +				 */
> +				tbp = ntp + 1;
> +
> +				/* Wrap ring if necessary */
> +				if (tbp >= cq->ring_size)
> +					tbp = 0;
> +
> +				while (tbp != cq->next_to_clean) {
> +					if (cq->bi.rx_buff[tbp]) {
> +						cq->bi.rx_buff[ntp] =
> +							cq->bi.rx_buff[tbp];
> +						cq->bi.rx_buff[tbp] = NULL;
> +
> +						/* Found a buffer, no need to
> +						 * search anymore
> +						 */
> +						break;
> +					}
> +
> +					/* Wrap ring if necessary */
> +					tbp++;
> +					if (tbp >= cq->ring_size)
> +						tbp = 0;
> +				}
> +
> +				if (tbp == cq->next_to_clean)
> +					goto post_buffs_out;
> +			} else {
> +				/* Give back pointer to DMA buffer */
> +				cq->bi.rx_buff[ntp] = buffs[i];
> +				i++;
> +
> +				if (i >= *buff_count)
> +					buffs_avail = false;
> +			}
> +		}
> +
> +		desc->flags =
> +			cpu_to_le16(IECM_CTLQ_FLAG_BUF | IECM_CTLQ_FLAG_RD);
> +
> +		/* Post buffers to descriptor */
> +		desc->datalen = cpu_to_le16(cq->bi.rx_buff[ntp]->size);
> +		desc->params.indirect.addr_high =
> +			cpu_to_le32(IECM_HI_DWORD(cq->bi.rx_buff[ntp]->pa));
> +		desc->params.indirect.addr_low =
> +			cpu_to_le32(IECM_LO_DWORD(cq->bi.rx_buff[ntp]->pa));
> +
> +		ntp++;
> +		if (ntp == cq->ring_size)
> +			ntp = 0;
> +	}

[] 

> @@ -186,7 +555,27 @@ iecm_wait_for_event(struct iecm_adapter *adapter,
>  		    enum iecm_vport_vc_state state,
>  		    enum iecm_vport_vc_state err_check)
>  {
> -	/* stub */
> +	enum iecm_status status;
> +	int event;
> +
> +	event = wait_event_timeout(adapter->vchnl_wq,
> +				   test_and_clear_bit(state, adapter->vc_state),
> +				   msecs_to_jiffies(500));
> +	if (event) {
> +		if (test_and_clear_bit(err_check, adapter->vc_state)) {
> +			dev_err(&adapter->pdev->dev,
> +				"VC response error %d", err_check);

missing format terminating newlines

> +			status = IECM_ERR_CFG;
> +		} else {
> +			status = 0;
> +		}
> +	} else {
> +		/* Timeout occurred */
> +		status = IECM_ERR_NOT_READY;
> +		dev_err(&adapter->pdev->dev, "VC timeout, state = %u", state);

here too



  reply	other threads:[~2020-06-26  2:57 UTC|newest]

Thread overview: 57+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-26  2:07 [net-next v3 00/15][pull request] 100GbE Intel Wired LAN Driver Updates 2020-06-25 Jeff Kirsher
2020-06-26  2:07 ` [net-next v3 01/15] virtchnl: Extend AVF ops Jeff Kirsher
2020-06-26  2:07 ` [net-next v3 02/15] iecm: Add framework set of header files Jeff Kirsher
2020-06-26  2:07 ` [net-next v3 03/15] iecm: Add TX/RX " Jeff Kirsher
2020-06-26  2:07 ` [net-next v3 04/15] iecm: Common module introduction and function stubs Jeff Kirsher
2020-06-26  2:23   ` Joe Perches
2020-06-26 17:34     ` Brady, Alan
2020-06-26 17:43       ` Joe Perches
2020-06-26  2:07 ` [net-next v3 05/15] iecm: Add basic netdevice functionality Jeff Kirsher
2020-06-26  2:39   ` Joe Perches
2020-06-26 17:38     ` Brady, Alan
2020-06-26  2:07 ` [net-next v3 06/15] iecm: Implement mailbox functionality Jeff Kirsher
2020-06-26  2:57   ` Joe Perches [this message]
2020-06-26 17:44     ` Brady, Alan
2020-06-26 23:11       ` Joe Perches
2020-06-26 19:10   ` Jakub Kicinski
2020-06-29 18:51     ` Brady, Alan
2020-06-26  2:07 ` [net-next v3 07/15] iecm: Implement virtchnl commands Jeff Kirsher
2020-06-26  3:06   ` Joe Perches
2020-06-26 17:51     ` Brady, Alan
2020-06-26  2:07 ` [net-next v3 08/15] iecm: Implement vector allocation Jeff Kirsher
2020-06-26  2:07 ` [net-next v3 09/15] iecm: Init and allocate vport Jeff Kirsher
2020-06-26 19:00   ` Jakub Kicinski
2020-06-29 18:48     ` Brady, Alan
2020-06-26  2:07 ` [net-next v3 10/15] iecm: Deinit vport Jeff Kirsher
2020-06-26  2:07 ` [net-next v3 11/15] iecm: Add splitq TX/RX Jeff Kirsher
2020-06-26 19:58   ` Jakub Kicinski
2020-06-27  1:26     ` Joe Perches
2020-06-29 18:57     ` Brady, Alan
2020-06-26  2:07 ` [net-next v3 12/15] iecm: Add singleq TX/RX Jeff Kirsher
2020-06-26  3:12   ` Joe Perches
2020-06-26 17:52     ` Brady, Alan
2020-06-26  2:07 ` [net-next v3 13/15] iecm: Add ethtool Jeff Kirsher
2020-06-26  3:29   ` Joe Perches
2020-06-26 17:57     ` Brady, Alan
2020-06-26 18:57   ` Jakub Kicinski
2020-06-29 18:48     ` Brady, Alan
2020-06-26 19:14   ` Jakub Kicinski
2020-06-29 18:53     ` Brady, Alan
2020-06-26 19:27   ` Jakub Kicinski
2020-06-29 21:00     ` Brady, Alan
2020-06-29 21:31       ` Jakub Kicinski
2020-06-29 22:07         ` Brady, Alan
2020-06-26 19:29   ` Jakub Kicinski
2020-07-10 20:16     ` Brady, Alan
2020-06-26  2:07 ` [net-next v3 14/15] iecm: Add iecm to the kernel build system Jeff Kirsher
2020-06-26  3:32   ` Joe Perches
2020-06-29 18:46     ` Brady, Alan
2020-06-26  2:07 ` [net-next v3 15/15] idpf: Introduce idpf driver Jeff Kirsher
2020-06-26  3:35   ` Joe Perches
2020-06-29 18:47     ` Brady, Alan
2020-06-26 18:52   ` Jakub Kicinski
2020-06-30 23:48     ` Kirsher, Jeffrey T
2020-07-01  0:59       ` Jakub Kicinski
2020-07-01  1:13         ` Kirsher, Jeffrey T
2020-06-26  3:37 ` [net-next v3 00/15][pull request] 100GbE Intel Wired LAN Driver Updates 2020-06-25 Joe Perches
2020-06-26 18:58 ` Jakub Kicinski

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=b2305a5aaefdd64630a6b99c7b46397ccb029fd9.camel@perches.com \
    --to=joe@perches.com \
    --cc=alan.brady@intel.com \
    --cc=alice.michael@intel.com \
    --cc=davem@davemloft.net \
    --cc=donald.c.skidmore@intel.com \
    --cc=jeffrey.t.kirsher@intel.com \
    --cc=jesse.brandeburg@intel.com \
    --cc=joshua.a.hay@intel.com \
    --cc=madhu.chittim@intel.com \
    --cc=netdev@vger.kernel.org \
    --cc=nhorman@redhat.com \
    --cc=pavan.kumar.linga@intel.com \
    --cc=phani.r.burra@intel.com \
    --cc=sassmann@redhat.com \
    --cc=sridhar.samudrala@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.