linux-scsi.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Hannes Reinecke <hare@suse.de>
To: James Smart <jsmart2021@gmail.com>, linux-scsi@vger.kernel.org
Cc: maier@linux.ibm.com, dwagner@suse.de, bvanassche@acm.org,
	Ram Vegesna <ram.vegesna@broadcom.com>
Subject: Re: [PATCH v2 19/32] elx: efct: Hardware IO and SGL initialization
Date: Thu, 9 Jan 2020 10:22:03 +0100	[thread overview]
Message-ID: <283ff0c9-58f7-65ff-6bd4-2715554bccc3@suse.de> (raw)
In-Reply-To: <20191220223723.26563-20-jsmart2021@gmail.com>

On 12/20/19 11:37 PM, James Smart wrote:
> This patch continues the efct driver population.
> 
> This patch adds driver definitions for:
> Routines to create IO interfaces (wqs, etc), SGL initialization,
> and configure hardware features.
> 
> Signed-off-by: Ram Vegesna <ram.vegesna@broadcom.com>
> Signed-off-by: James Smart <jsmart2021@gmail.com>
> ---
>  drivers/scsi/elx/efct/efct_hw.c | 1480 ++++++++++++++++++++++++++++++++++++---
>  drivers/scsi/elx/efct/efct_hw.h |   46 ++
>  2 files changed, 1427 insertions(+), 99 deletions(-)
> 
> diff --git a/drivers/scsi/elx/efct/efct_hw.c b/drivers/scsi/elx/efct/efct_hw.c
> index 339e904b0276..beca8534813d 100644
> --- a/drivers/scsi/elx/efct/efct_hw.c
> +++ b/drivers/scsi/elx/efct/efct_hw.c
> @@ -240,6 +240,505 @@ efct_logfcfi(struct efct_hw *hw, u32 j, u32 i, u32 id)
>  		     j, hw->config.filter_def[j], i, id);
>  }
>  
> +static inline void
> +efct_hw_init_free_io(struct efct_hw_io *io)
> +{
> +	/*
> +	 * Set io->done to NULL, to avoid any callbacks, should
> +	 * a completion be received for one of these IOs
> +	 */
> +	io->done = NULL;
> +	io->abort_done = NULL;
> +	io->status_saved = false;
> +	io->abort_in_progress = false;
> +	io->rnode = NULL;
> +	io->type = 0xFFFF;
> +	io->wq = NULL;
> +	io->ul_io = NULL;
> +	io->tgt_wqe_timeout = 0;
> +}
> +
> +static void
> +efct_hw_io_restore_sgl(struct efct_hw *hw, struct efct_hw_io *io)
> +{
> +	/* Restore the default */
> +	io->sgl = &io->def_sgl;
> +	io->sgl_count = io->def_sgl_count;
> +
> +	/* Clear the overflow SGL */
> +	io->ovfl_sgl = NULL;
> +	io->ovfl_sgl_count = 0;
> +	io->ovfl_lsp = NULL;
> +}
> +
> +/* Initialize the pool of HW IO objects */
> +static enum efct_hw_rtn
> +efct_hw_setup_io(struct efct_hw *hw)
> +{
> +	u32	i = 0;
> +	struct efct_hw_io	*io = NULL;
> +	uintptr_t	xfer_virt = 0;
> +	uintptr_t	xfer_phys = 0;
> +	u32	index;
> +	bool new_alloc = true;
> +	struct efc_dma *dma;
> +	struct efct *efct = hw->os;
> +
> +	if (!hw->io) {
> +		hw->io = kmalloc_array(hw->config.n_io, sizeof(io),
> +				 GFP_KERNEL);
> +
> +		if (!hw->io)
> +			return EFCT_HW_RTN_NO_MEMORY;
> +
> +		memset(hw->io, 0, hw->config.n_io * sizeof(io));
> +
> +		for (i = 0; i < hw->config.n_io; i++) {
> +			hw->io[i] = kmalloc(sizeof(*io), GFP_KERNEL);
> +			if (!hw->io[i])
> +				goto error;
> +
> +			memset(hw->io[i], 0, sizeof(struct efct_hw_io));
> +		}
> +
> +		/* Create WQE buffs for IO */
> +		hw->wqe_buffs = kmalloc((hw->config.n_io *
> +					     hw->sli.wqe_size),
> +					     GFP_ATOMIC);
> +		if (!hw->wqe_buffs) {
> +			kfree(hw->io);
> +			return EFCT_HW_RTN_NO_MEMORY;
> +		}
> +		memset(hw->wqe_buffs, 0, (hw->config.n_io *
> +					hw->sli.wqe_size));
> +
> +	} else {
> +		/* re-use existing IOs, including SGLs */
> +		new_alloc = false;
> +	}
> +
> +	if (new_alloc) {
> +		dma = &hw->xfer_rdy;
> +		dma->size = sizeof(struct fcp_txrdy) * hw->config.n_io;
> +		dma->virt = dma_alloc_coherent(&efct->pcidev->dev,
> +					       dma->size, &dma->phys, GFP_DMA);
> +		if (!dma->virt)
> +			return EFCT_HW_RTN_NO_MEMORY;
> +	}
> +	xfer_virt = (uintptr_t)hw->xfer_rdy.virt;
> +	xfer_phys = hw->xfer_rdy.phys;
> +
> +	for (i = 0; i < hw->config.n_io; i++) {
> +		struct hw_wq_callback *wqcb;
> +
> +		io = hw->io[i];
> +
> +		/* initialize IO fields */
> +		io->hw = hw;
> +
> +		/* Assign a WQE buff */
> +		io->wqe.wqebuf = &hw->wqe_buffs[i * hw->sli.wqe_size];
> +
> +		/* Allocate the request tag for this IO */
> +		wqcb = efct_hw_reqtag_alloc(hw, efct_hw_wq_process_io, io);
> +		if (!wqcb) {
> +			efc_log_err(hw->os, "can't allocate request tag\n");
> +			return EFCT_HW_RTN_NO_RESOURCES;
> +		}
> +		io->reqtag = wqcb->instance_index;
> +
> +		/* Now for the fields that are initialized on each free */
> +		efct_hw_init_free_io(io);
> +
> +		/* The XB flag isn't cleared on IO free, so init to zero */
> +		io->xbusy = 0;
> +
> +		if (sli_resource_alloc(&hw->sli, SLI_RSRC_XRI,
> +				       &io->indicator, &index)) {
> +			efc_log_err(hw->os,
> +				     "sli_resource_alloc failed @ %d\n", i);
> +			return EFCT_HW_RTN_NO_MEMORY;
> +		}
> +		if (new_alloc) {
> +			dma = &io->def_sgl;
> +			dma->size = hw->config.n_sgl *
> +					sizeof(struct sli4_sge);
> +			dma->virt = dma_alloc_coherent(&efct->pcidev->dev,
> +						       dma->size, &dma->phys,
> +						       GFP_DMA);
> +			if (!dma->virt) {
> +				efc_log_err(hw->os, "dma_alloc fail %d\n", i);
> +				memset(&io->def_sgl, 0,
> +				       sizeof(struct efc_dma));
> +				return EFCT_HW_RTN_NO_MEMORY;
> +			}
> +		}
> +		io->def_sgl_count = hw->config.n_sgl;
> +		io->sgl = &io->def_sgl;
> +		io->sgl_count = io->def_sgl_count;
> +
> +		if (hw->xfer_rdy.size) {
> +			io->xfer_rdy.virt = (void *)xfer_virt;
> +			io->xfer_rdy.phys = xfer_phys;
> +			io->xfer_rdy.size = sizeof(struct fcp_txrdy);
> +
> +			xfer_virt += sizeof(struct fcp_txrdy);
> +			xfer_phys += sizeof(struct fcp_txrdy);
> +		}
> +	}
> +
> +	return EFCT_HW_RTN_SUCCESS;
> +error:
> +	for (i = 0; i < hw->config.n_io && hw->io[i]; i++) {
> +		kfree(hw->io[i]);
> +		hw->io[i] = NULL;
> +	}
> +
> +	kfree(hw->io);
> +	hw->io = NULL;
> +
> +	return EFCT_HW_RTN_NO_MEMORY;
> +}
> +
> +static enum efct_hw_rtn
> +efct_hw_init_io(struct efct_hw *hw)
> +{
> +	u32	i = 0, io_index = 0;
> +	bool prereg = false;
> +	struct efct_hw_io	*io = NULL;
> +	u8		cmd[SLI4_BMBX_SIZE];
> +	enum efct_hw_rtn rc = EFCT_HW_RTN_SUCCESS;
> +	u32	nremaining;
> +	u32	n = 0;
> +	u32	sgls_per_request = 256;
> +	struct efc_dma	**sgls = NULL;
> +	struct efc_dma	reqbuf;
> +	struct efct *efct = hw->os;
> +
> +	prereg = hw->sli.sgl_pre_registered;
> +
> +	memset(&reqbuf, 0, sizeof(struct efc_dma));
> +	if (prereg) {
> +		sgls = kmalloc_array(sgls_per_request, sizeof(*sgls),
> +				     GFP_ATOMIC);
> +		if (!sgls)
> +			return EFCT_HW_RTN_NO_MEMORY;
> +
> +		reqbuf.size = 32 + sgls_per_request * 16;
> +		reqbuf.virt = dma_alloc_coherent(&efct->pcidev->dev,
> +						 reqbuf.size, &reqbuf.phys,
> +						 GFP_DMA);
> +		if (!reqbuf.virt) {
> +			efc_log_err(hw->os, "dma_alloc reqbuf failed\n");
> +			kfree(sgls);
> +			return EFCT_HW_RTN_NO_MEMORY;
> +		}
> +	}
> +
> +	for (nremaining = hw->config.n_io; nremaining; nremaining -= n) {
> +		if (prereg) {
> +			/* Copy address of SGL's into local sgls[] array, break
> +			 * out if the xri is not contiguous.
> +			 */
> +			u32 min = (sgls_per_request < nremaining)
> +					? sgls_per_request : nremaining;
> +			for (n = 0; n < min; n++) {
> +				/* Check that we have contiguous xri values */
> +				if (n > 0) {
> +					if (hw->io[io_index + n]->indicator !=
> +					    hw->io[io_index + n - 1]->indicator
> +					    + 1)
> +						break;
> +				}
> +				sgls[n] = hw->io[io_index + n]->sgl;
> +			}
> +
> +			if (!sli_cmd_post_sgl_pages(&hw->sli, cmd,
> +						   sizeof(cmd),
> +						hw->io[io_index]->indicator,
> +						n, sgls, NULL, &reqbuf)) {
> +				if (efct_hw_command(hw, cmd, EFCT_CMD_POLL,
> +						    NULL, NULL)) {
> +					rc = EFCT_HW_RTN_ERROR;
> +					efc_log_err(hw->os,
> +						     "SGL post failed\n");
> +					break;
> +				}
> +			}
> +		} else {
> +			n = nremaining;
> +		}
> +
> +		/* Add to tail if successful */
> +		for (i = 0; i < n; i++, io_index++) {
> +			io = hw->io[io_index];
> +			io->state = EFCT_HW_IO_STATE_FREE;
> +			INIT_LIST_HEAD(&io->list_entry);
> +			list_add_tail(&io->list_entry, &hw->io_free);
> +		}
> +	}
> +
> +	if (prereg) {
> +		dma_free_coherent(&efct->pcidev->dev,
> +				  reqbuf.size, reqbuf.virt, reqbuf.phys);
> +		memset(&reqbuf, 0, sizeof(struct efc_dma));
> +		kfree(sgls);
> +	}
> +
> +	return rc;
> +}
> +
> +static enum efct_hw_rtn
> +efct_hw_config_set_fdt_xfer_hint(struct efct_hw *hw, u32 fdt_xfer_hint)
> +{
> +	enum efct_hw_rtn rc = EFCT_HW_RTN_SUCCESS;
> +	u8 buf[SLI4_BMBX_SIZE];
> +	struct sli4_rqst_cmn_set_features_set_fdt_xfer_hint param;
> +
> +	memset(&param, 0, sizeof(param));
> +	param.fdt_xfer_hint = cpu_to_le32(fdt_xfer_hint);
> +	/* build the set_features command */
> +	sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
> +				    SLI4_SET_FEATURES_SET_FTD_XFER_HINT,
> +				    sizeof(param),
> +				    &param);
> +
> +	rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
> +	if (rc)
> +		efc_log_warn(hw->os, "set FDT hint %d failed: %d\n",
> +			      fdt_xfer_hint, rc);
> +	else
> +		efc_log_info(hw->os, "Set FTD transfer hint to %d\n",
> +			      le32_to_cpu(param.fdt_xfer_hint));
> +
> +	return rc;
> +}
> +
> +/**
> + * efct_hw_config_mrq() - Configure Multi-RQ
> + *
> + * @hw: Hardware context allocated by the caller.
> + * @mode: 1 to set MRQ filters and 0 to set FCFI index
> + * @fcf_index: valid in mode 0
> + *
> + * Returns 0 on success, or a non-zero value on failure.
> + */
> +static int
> +efct_hw_config_mrq(struct efct_hw *hw, u8 mode, u16 fcf_index)
> +{
> +	u8 buf[SLI4_BMBX_SIZE], mrq_bitmask = 0;
> +	struct hw_rq *rq;
> +	struct sli4_cmd_reg_fcfi_mrq *rsp = NULL;
> +	u32 i, j;
> +	struct sli4_cmd_rq_cfg rq_filter[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG];
> +	int rc;
> +
> +	if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE)
> +		goto issue_cmd;
> +
> +	/* Set the filter match/mask values from hw's filter_def values */
> +	for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
> +		rq_filter[i].rq_id = cpu_to_le16(0xffff);
> +		rq_filter[i].r_ctl_mask  = (u8)hw->config.filter_def[i];
> +		rq_filter[i].r_ctl_match = (u8)(hw->config.filter_def[i] >> 8);
> +		rq_filter[i].type_mask   = (u8)(hw->config.filter_def[i] >> 16);
> +		rq_filter[i].type_match  = (u8)(hw->config.filter_def[i] >> 24);
> +	}
> +
> +	/* Accumulate counts for each filter type used, build rq_ids[] list */
> +	for (i = 0; i < hw->hw_rq_count; i++) {
> +		rq = hw->hw_rq[i];
> +		for (j = 0; j < SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG; j++) {
> +			if (!(rq->filter_mask & (1U << j)))
> +				continue;
> +
> +			if (rq_filter[j].rq_id != cpu_to_le16(0xffff)) {
> +				/*
> +				 * Already used. Bailout ifts not RQset
> +				 * case
> +				 */
> +				if (!rq->is_mrq ||
> +				    rq_filter[j].rq_id !=
> +				    cpu_to_le16(rq->base_mrq_id)) {
> +					efc_log_err(hw->os, "Wrong q top.\n");
> +					return EFCT_HW_RTN_ERROR;
> +				}
> +				continue;
> +			}
> +
> +			if (!rq->is_mrq) {
> +				rq_filter[j].rq_id = cpu_to_le16(rq->hdr->id);
> +				continue;
> +			}
> +
> +			rq_filter[j].rq_id = cpu_to_le16(rq->base_mrq_id);
> +			mrq_bitmask |= (1U << j);
> +		}
> +	}
> +
> +issue_cmd:
> +	/* Invoke REG_FCFI_MRQ */
> +	rc = sli_cmd_reg_fcfi_mrq(&hw->sli,
> +				  buf,	/* buf */
> +				 SLI4_BMBX_SIZE, /* size */
> +				 mode, /* mode 1 */
> +				 fcf_index, /* fcf_index */
> +				 /* RQ selection policy*/
> +				 hw->config.rq_selection_policy,
> +				 mrq_bitmask, /* MRQ bitmask */
> +				 hw->hw_mrq_count, /* num_mrqs */
> +				 rq_filter);/* RQ filter */
> +	if (rc) {
> +		efc_log_err(hw->os,
> +			     "sli_cmd_reg_fcfi_mrq() failed: %d\n", rc);
> +		return EFCT_HW_RTN_ERROR;
> +	}
> +
> +	rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
> +
> +	rsp = (struct sli4_cmd_reg_fcfi_mrq *)buf;
> +
> +	if (rc != EFCT_HW_RTN_SUCCESS ||
> +	    le16_to_cpu(rsp->hdr.status)) {
> +		efc_log_err(hw->os,
> +			     "FCFI MRQ reg failed. cmd = %x status = %x\n",
> +			     rsp->hdr.command,
> +			     le16_to_cpu(rsp->hdr.status));
> +		return EFCT_HW_RTN_ERROR;
> +	}
> +
> +	if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE)
> +		hw->fcf_indicator = le16_to_cpu(rsp->fcfi);
> +	return 0;
> +}
> +
> +static enum efct_hw_rtn
> +efct_hw_config_watchdog_timer(struct efct_hw *hw);
> +
> +static void
> +efct_hw_watchdog_timer_cb(struct timer_list *t)
> +{
> +	struct efct_hw *hw = from_timer(hw, t, watchdog_timer);
> +
> +	efct_hw_config_watchdog_timer(hw);
> +}
> +
> +static void
> +efct_hw_cb_cfg_watchdog(struct efct_hw *hw, int status, u8 *mqe,
> +			void  *arg)
> +{
> +	u16 timeout = hw->watchdog_timeout;
> +
> +	if (status != 0) {
> +		efc_log_err(hw->os, "config watchdog timer failed, rc = %d\n",
> +			     status);
> +	} else {
> +		if (timeout != 0) {
> +			/*
> +			 * keeping callback 500ms before timeout to keep
> +			 * heartbeat alive
> +			 */
> +			timer_setup(&hw->watchdog_timer,
> +				    &efct_hw_watchdog_timer_cb, 0);
> +
> +			mod_timer(&hw->watchdog_timer,
> +				  jiffies +
> +				  msecs_to_jiffies(timeout * 1000 - 500));
> +		} else {
> +			del_timer(&hw->watchdog_timer);
> +		}
> +	}
> +
> +	kfree(mqe);
> +}
> +
> +/* Set configuration parameters for watchdog timer feature */
> +static enum efct_hw_rtn
> +efct_hw_config_watchdog_timer(struct efct_hw *hw)
> +{
> +	enum efct_hw_rtn rc = EFCT_HW_RTN_SUCCESS;
> +	u8 *buf = kmalloc(SLI4_BMBX_SIZE, GFP_ATOMIC);
> +
> +	if (!buf)
> +		return EFCT_HW_RTN_ERROR;
> +
> +	sli4_cmd_lowlevel_set_watchdog(&hw->sli, buf, SLI4_BMBX_SIZE,
> +				       hw->watchdog_timeout);
> +	rc = efct_hw_command(hw, buf, EFCT_CMD_NOWAIT, efct_hw_cb_cfg_watchdog,
> +			     NULL);
> +	if (rc) {
> +		kfree(buf);
> +		efc_log_err(hw->os, "config watchdog timer failed, rc = %d\n",
> +			     rc);
> +	}
> +	return rc;
> +}
> +
> +static enum efct_hw_rtn
> +efct_hw_set_dif_seed(struct efct_hw *hw)
> +{
> +	enum efct_hw_rtn rc = EFCT_HW_RTN_SUCCESS;
> +	u8 buf[SLI4_BMBX_SIZE];
> +	struct sli4_rqst_cmn_set_features_dif_seed seed_param;
> +
> +	memset(&seed_param, 0, sizeof(seed_param));
> +	seed_param.seed = cpu_to_le16(hw->config.dif_seed);
> +
> +	/* send set_features command */
> +	if (!sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
> +					SLI4_SET_FEATURES_DIF_SEED,
> +					4,
> +					(u32 *)&seed_param)) {
> +		rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
> +		if (rc)
> +			efc_log_err(hw->os,
> +				     "efct_hw_command returns %d\n", rc);
> +		else
> +			efc_log_debug(hw->os, "DIF seed set to 0x%x\n",
> +				       hw->config.dif_seed);
> +	} else {
> +		efc_log_err(hw->os,
> +			     "sli_cmd_common_set_features failed\n");
> +		rc = EFCT_HW_RTN_ERROR;
> +	}
> +	return rc;
> +}
> +
> +/* enable sli port health check */
> +static enum efct_hw_rtn
> +efct_hw_config_sli_port_health_check(struct efct_hw *hw, u8 query,
> +				     u8 enable)
> +{
> +	enum efct_hw_rtn rc = EFCT_HW_RTN_SUCCESS;
> +	u8 buf[SLI4_BMBX_SIZE];
> +	struct sli4_rqst_cmn_set_features_health_check param;
> +	u32	health_check_flag = 0;
> +
> +	memset(&param, 0, sizeof(param));
> +
> +	if (enable)
> +		health_check_flag |= SLI4_RQ_HEALTH_CHECK_ENABLE;
> +
> +	if (query)
> +		health_check_flag |= SLI4_RQ_HEALTH_CHECK_QUERY;
> +
> +	param.health_check_dword = cpu_to_le32(health_check_flag);
> +
> +	/* build the set_features command */
> +	sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
> +				    SLI4_SET_FEATURES_SLI_PORT_HEALTH_CHECK,
> +				    sizeof(param),
> +				    &param);
> +
> +	rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
> +	if (rc)
> +		efc_log_err(hw->os, "efct_hw_command returns %d\n", rc);
> +	else
> +		efc_log_test(hw->os, "SLI Port Health Check is enabled\n");
> +
> +	return rc;
> +}
> +
>  enum efct_hw_rtn
>  efct_hw_init(struct efct_hw *hw)
>  {
> @@ -712,104 +1211,6 @@ efct_hw_init(struct efct_hw *hw)
>  	return EFCT_HW_RTN_SUCCESS;
>  }
>  
> -/**
> - * efct_hw_config_mrq() - Configure Multi-RQ
> - *
> - * @hw: Hardware context allocated by the caller.
> - * @mode: 1 to set MRQ filters and 0 to set FCFI index
> - * @fcf_index: valid in mode 0
> - *
> - * Returns 0 on success, or a non-zero value on failure.
> - */
> -static int
> -efct_hw_config_mrq(struct efct_hw *hw, u8 mode, u16 fcf_index)
> -{
> -	u8 buf[SLI4_BMBX_SIZE], mrq_bitmask = 0;
> -	struct hw_rq *rq;
> -	struct sli4_cmd_reg_fcfi_mrq *rsp = NULL;
> -	u32 i, j;
> -	struct sli4_cmd_rq_cfg rq_filter[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG];
> -	int rc;
> -
> -	if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE)
> -		goto issue_cmd;
> -
> -	/* Set the filter match/mask values from hw's filter_def values */
> -	for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
> -		rq_filter[i].rq_id = cpu_to_le16(0xffff);
> -		rq_filter[i].r_ctl_mask  = (u8)hw->config.filter_def[i];
> -		rq_filter[i].r_ctl_match = (u8)(hw->config.filter_def[i] >> 8);
> -		rq_filter[i].type_mask   = (u8)(hw->config.filter_def[i] >> 16);
> -		rq_filter[i].type_match  = (u8)(hw->config.filter_def[i] >> 24);
> -	}
> -
> -	/* Accumulate counts for each filter type used, build rq_ids[] list */
> -	for (i = 0; i < hw->hw_rq_count; i++) {
> -		rq = hw->hw_rq[i];
> -		for (j = 0; j < SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG; j++) {
> -			if (!(rq->filter_mask & (1U << j)))
> -				continue;
> -
> -			if (rq_filter[j].rq_id != cpu_to_le16(0xffff)) {
> -				/*
> -				 * Already used. Bailout ifts not RQset
> -				 * case
> -				 */
> -				if (!rq->is_mrq ||
> -				    rq_filter[j].rq_id !=
> -				    cpu_to_le16(rq->base_mrq_id)) {
> -					efc_log_err(hw->os, "Wrong q top.\n");
> -					return EFCT_HW_RTN_ERROR;
> -				}
> -				continue;
> -			}
> -
> -			if (!rq->is_mrq) {
> -				rq_filter[j].rq_id = cpu_to_le16(rq->hdr->id);
> -				continue;
> -			}
> -
> -			rq_filter[j].rq_id = cpu_to_le16(rq->base_mrq_id);
> -			mrq_bitmask |= (1U << j);
> -		}
> -	}
> -
> -issue_cmd:
> -	/* Invoke REG_FCFI_MRQ */
> -	rc = sli_cmd_reg_fcfi_mrq(&hw->sli,
> -				  buf,	/* buf */
> -				 SLI4_BMBX_SIZE, /* size */
> -				 mode, /* mode 1 */
> -				 fcf_index, /* fcf_index */
> -				 /* RQ selection policy*/
> -				 hw->config.rq_selection_policy,
> -				 mrq_bitmask, /* MRQ bitmask */
> -				 hw->hw_mrq_count, /* num_mrqs */
> -				 rq_filter);/* RQ filter */
> -	if (rc) {
> -		efc_log_err(hw->os,
> -			     "sli_cmd_reg_fcfi_mrq() failed: %d\n", rc);
> -		return EFCT_HW_RTN_ERROR;
> -	}
> -
> -	rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
> -
> -	rsp = (struct sli4_cmd_reg_fcfi_mrq *)buf;
> -
> -	if (rc != EFCT_HW_RTN_SUCCESS ||
> -	    le16_to_cpu(rsp->hdr.status)) {
> -		efc_log_err(hw->os,
> -			     "FCFI MRQ reg failed. cmd = %x status = %x\n",
> -			     rsp->hdr.command,
> -			     le16_to_cpu(rsp->hdr.status));
> -		return EFCT_HW_RTN_ERROR;
> -	}
> -
> -	if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE)
> -		hw->fcf_indicator = le16_to_cpu(rsp->fcfi);
> -	return 0;
> -}
> -
>  enum efct_hw_rtn
>  efct_hw_set(struct efct_hw *hw, enum efct_hw_property prop, u32 value)
>  {
> @@ -1221,6 +1622,10 @@ efct_get_wwn(struct efct_hw *hw, enum efct_hw_property prop)
>  	return value;
>  }
>  
> +/*
> + * An efct_hw_rx_buffer_t array is allocated,
> + * along with the required DMA mem
> + */
>  static struct efc_hw_rq_buffer *
>  efct_hw_rx_buffer_alloc(struct efct_hw *hw, u32 rqindex, u32 count,
>  			u32 size)
> @@ -1327,6 +1732,7 @@ efct_hw_rx_allocate(struct efct_hw *hw)
>  	return rc ? EFCT_HW_RTN_ERROR : EFCT_HW_RTN_SUCCESS;
>  }
>  
> +/* Post the RQ data buffers to the chip */
>  enum efct_hw_rtn
>  efct_hw_rx_post(struct efct_hw *hw)
>  {
> @@ -1414,7 +1820,7 @@ efct_hw_cmd_submit_pending(struct efct_hw *hw)
>  	return rc;
>  }
>  
> -/**
> +/*
>   * Send a mailbox command to the hardware, and either wait for a completion
>   * (EFCT_CMD_POLL) or get an optional asynchronous completion (EFCT_CMD_NOWAIT).
>   */

Pointless hunk.

> @@ -1575,3 +1981,879 @@ efct_hw_command_cancel(struct efct_hw *hw)
>  
>  	return 0;
>  }
> +
> +static inline struct efct_hw_io *
> +_efct_hw_io_alloc(struct efct_hw *hw)
> +{
> +	struct efct_hw_io	*io = NULL;
> +
> +	if (!list_empty(&hw->io_free)) {
> +		io = list_first_entry(&hw->io_free, struct efct_hw_io,
> +				      list_entry);
> +		list_del(&io->list_entry);
> +	}
> +	if (io) {
> +		INIT_LIST_HEAD(&io->list_entry);
> +		INIT_LIST_HEAD(&io->wqe_link);
> +		INIT_LIST_HEAD(&io->dnrx_link);
> +		list_add_tail(&io->list_entry, &hw->io_inuse);
> +		io->state = EFCT_HW_IO_STATE_INUSE;
> +		io->abort_reqtag = U32_MAX;
> +		kref_init(&io->ref);
> +		io->release = efct_hw_io_free_internal;
> +	} else {
> +		atomic_add_return(1, &hw->io_alloc_failed_count);
> +	}
> +
> +	return io;
> +}
> +
> +struct efct_hw_io *
> +efct_hw_io_alloc(struct efct_hw *hw)
> +{
> +	struct efct_hw_io	*io = NULL;
> +	unsigned long flags = 0;
> +
> +	spin_lock_irqsave(&hw->io_lock, flags);
> +	io = _efct_hw_io_alloc(hw);
> +	spin_unlock_irqrestore(&hw->io_lock, flags);
> +
> +	return io;
> +}
> +
> +/*
> + * When an IO is freed, depending on the exchange busy flag, and other
> + * workarounds, move it to the correct list.
> + */
> +static void
> +efct_hw_io_free_move_correct_list(struct efct_hw *hw,
> +				  struct efct_hw_io *io)
> +{
> +	if (io->xbusy) {
> +		/*
> +		 * add to wait_free list and wait for XRI_ABORTED CQEs to clean
> +		 * up
> +		 */
> +		INIT_LIST_HEAD(&io->list_entry);
> +		list_add_tail(&io->list_entry, &hw->io_wait_free);
> +		io->state = EFCT_HW_IO_STATE_WAIT_FREE;
> +	} else {
> +		/* IO not busy, add to free list */
> +		INIT_LIST_HEAD(&io->list_entry);
> +		list_add_tail(&io->list_entry, &hw->io_free);
> +		io->state = EFCT_HW_IO_STATE_FREE;
> +	}
> +}
> +
> +static inline void
> +efct_hw_io_free_common(struct efct_hw *hw, struct efct_hw_io *io)
> +{
> +	/* initialize IO fields */
> +	efct_hw_init_free_io(io);
> +
> +	/* Restore default SGL */
> +	efct_hw_io_restore_sgl(hw, io);
> +}
> +
> +/**
> + * Free a previously-allocated HW IO object. Called when
> + * IO refcount goes to zero (host-owned IOs only).
> + */
> +void
> +efct_hw_io_free_internal(struct kref *arg)
> +{
> +	unsigned long flags = 0;
> +	struct efct_hw_io *io =
> +			container_of(arg, struct efct_hw_io, ref);
> +	struct efct_hw *hw = io->hw;
> +
> +	/* perform common cleanup */
> +	efct_hw_io_free_common(hw, io);
> +
> +	spin_lock_irqsave(&hw->io_lock, flags);
> +		/* remove from in-use list */
> +		if (io->list_entry.next &&
> +		    !list_empty(&hw->io_inuse)) {
> +			list_del(&io->list_entry);
> +			efct_hw_io_free_move_correct_list(hw, io);
> +		}
> +	spin_unlock_irqrestore(&hw->io_lock, flags);
> +}
> +
> +int
> +efct_hw_io_free(struct efct_hw *hw, struct efct_hw_io *io)
> +{
> +	/* just put refcount */
> +	if (refcount_read(&io->ref.refcount) <= 0) {
> +		efc_log_err(hw->os,
> +			     "Bad parameter: refcount <= 0 xri=%x tag=%x\n",
> +			    io->indicator, io->reqtag);
> +		return -1;
> +	}
> +
> +	return kref_put(&io->ref, io->release);
> +}
> +

Why this check? Shouldn't kref_get_unless_zero() protect against this?

> +u8
> +efct_hw_io_inuse(struct efct_hw *hw, struct efct_hw_io *io)
> +{
> +	return (refcount_read(&io->ref.refcount) > 0);
> +}
> +
> +struct efct_hw_io *
> +efct_hw_io_lookup(struct efct_hw *hw, u32 xri)
> +{
> +	u32 ioindex;
> +
> +	ioindex = xri - hw->sli.extent[SLI_RSRC_XRI].base[0];
> +	return hw->io[ioindex];
> +}
> +
> +/**
> + * Issue any pending callbacks for an IO and remove off the timer and
> + * pending lists.
> + */
> +static void
> +efct_hw_io_cancel_cleanup(struct efct_hw *hw, struct efct_hw_io *io)
> +{
> +	efct_hw_done_t done = io->done;
> +	efct_hw_done_t abort_done = io->abort_done;
> +	unsigned long flags = 0;
> +
> +	/* first check active_wqe list and remove if there */
> +	if (io->wqe_link.next)
> +		list_del(&io->wqe_link);
> +
> +	/* Remove from WQ pending list */
> +	if (io->wq && io->wq->pending_list.next)
> +		list_del(&io->list_entry);
> +
> +	if (io->done) {
> +		void *arg = io->arg;
> +
> +		io->done = NULL;
> +		spin_unlock_irqrestore(&hw->io_lock, flags);
> +		done(io, io->rnode, 0, SLI4_FC_WCQE_STATUS_SHUTDOWN, 0, arg);
> +		spin_lock_irqsave(&hw->io_lock, flags);
> +	}
> +
> +	if (io->abort_done) {
> +		void		*abort_arg = io->abort_arg;
> +
> +		io->abort_done = NULL;
> +		spin_unlock_irqrestore(&hw->io_lock, flags);
> +		abort_done(io, io->rnode, 0, SLI4_FC_WCQE_STATUS_SHUTDOWN, 0,
> +			   abort_arg);
> +		spin_lock_irqsave(&hw->io_lock, flags);
> +	}
> +}
> +
> +static int
> +efct_hw_io_cancel(struct efct_hw *hw)
> +{
> +	struct efct_hw_io *io = NULL;
> +	struct efct_hw_io *tmp_io = NULL;
> +	u32 iters = 100; /* One second limit */
> +	unsigned long flags = 0;
> +
> +	/*
> +	 * Manually clean up outstanding IO.
> +	 * Only walk through list once: the backend will cleanup any IOs when
> +	 * done/abort_done is called.
> +	 */
> +	spin_lock_irqsave(&hw->io_lock, flags);
> +	list_for_each_entry_safe(io, tmp_io, &hw->io_inuse, list_entry) {
> +		efct_hw_done_t  done = io->done;
> +		efct_hw_done_t  abort_done = io->abort_done;
> +
> +		efct_hw_io_cancel_cleanup(hw, io);
> +
> +		/*
> +		 * Since this is called in a reset/shutdown
> +		 * case, If there is no callback, then just
> +		 * free the IO.
> +		 *
> +		 * Note: A port owned XRI cannot be on
> +		 *       the in use list. We cannot call
> +		 *       efct_hw_io_free() because we already
> +		 *       hold the io_lock.
> +		 */
> +		if (!done &&
> +		    !abort_done) {
> +			/*
> +			 * Since this is called in a reset/shutdown
> +			 * case, If there is no callback, then just
> +			 * free the IO.
> +			 */
> +			efct_hw_io_free_common(hw, io);
> +			list_del(&io->list_entry);
> +			efct_hw_io_free_move_correct_list(hw, io);
> +		}
> +	}
> +
> +	spin_unlock_irqrestore(&hw->io_lock, flags);
> +
> +	/* Give time for the callbacks to complete */
> +	do {
> +		mdelay(10);
> +		iters--;
> +	} while (!list_empty(&hw->io_inuse) && iters);
> +

That is pretty lame.
Can't you use refcounts for 'hw' just call 'kref_put(hw)'?


Cheers,

Hannes
-- 
Dr. Hannes Reinecke		      Teamlead Storage & Networking
hare@suse.de			                  +49 911 74053 688
SUSE Software Solutions Germany GmbH, Maxfeldstr. 5, 90409 Nürnberg
HRB 36809 (AG Nürnberg), GF: Felix Imendörffer

  reply	other threads:[~2020-01-09  9:27 UTC|newest]

Thread overview: 77+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-12-20 22:36 [PATCH v2 00/32] [NEW] efct: Broadcom (Emulex) FC Target driver James Smart
2019-12-20 22:36 ` [PATCH v2 01/32] elx: libefc_sli: SLI-4 register offsets and field definitions James Smart
2020-01-08  7:11   ` Hannes Reinecke
2020-01-09  0:59     ` James Smart
2019-12-20 22:36 ` [PATCH v2 02/32] elx: libefc_sli: SLI Descriptors and Queue entries James Smart
2020-01-08  7:24   ` Hannes Reinecke
2020-01-09  1:00     ` James Smart
2019-12-20 22:36 ` [PATCH v2 03/32] elx: libefc_sli: Data structures and defines for mbox commands James Smart
2020-01-08  7:32   ` Hannes Reinecke
2020-01-09  1:03     ` James Smart
2019-12-20 22:36 ` [PATCH v2 04/32] elx: libefc_sli: queue create/destroy/parse routines James Smart
2020-01-08  7:45   ` Hannes Reinecke
2020-01-09  1:04     ` James Smart
2019-12-20 22:36 ` [PATCH v2 05/32] elx: libefc_sli: Populate and post different WQEs James Smart
2020-01-08  7:54   ` Hannes Reinecke
2020-01-09  1:04     ` James Smart
2019-12-20 22:36 ` [PATCH v2 06/32] elx: libefc_sli: bmbx routines and SLI config commands James Smart
2020-01-08  8:05   ` Hannes Reinecke
2019-12-20 22:36 ` [PATCH v2 07/32] elx: libefc_sli: APIs to setup SLI library James Smart
2020-01-08  8:22   ` Hannes Reinecke
2020-01-09  1:29     ` James Smart
2019-12-20 22:36 ` [PATCH v2 08/32] elx: libefc: Generic state machine framework James Smart
2020-01-09  7:05   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 09/32] elx: libefc: Emulex FC discovery library APIs and definitions James Smart
2020-01-09  7:16   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 10/32] elx: libefc: FC Domain state machine interfaces James Smart
2020-01-09  7:27   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 11/32] elx: libefc: SLI and FC PORT " James Smart
2020-01-09  7:34   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 12/32] elx: libefc: Remote node " James Smart
2020-01-09  8:31   ` Hannes Reinecke
2020-01-09  9:57   ` Daniel Wagner
2019-12-20 22:37 ` [PATCH v2 13/32] elx: libefc: Fabric " James Smart
2020-01-09  8:34   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 14/32] elx: libefc: FC node ELS and state handling James Smart
2020-01-09  8:39   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 15/32] elx: efct: Data structures and defines for hw operations James Smart
2020-01-09  8:41   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 16/32] elx: efct: Driver initialization routines James Smart
2020-01-09  9:01   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 17/32] elx: efct: Hardware queues creation and deletion James Smart
2020-01-09  9:10   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 18/32] elx: efct: RQ buffer, memory pool allocation and deallocation APIs James Smart
2020-01-09  9:13   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 19/32] elx: efct: Hardware IO and SGL initialization James Smart
2020-01-09  9:22   ` Hannes Reinecke [this message]
2019-12-20 22:37 ` [PATCH v2 20/32] elx: efct: Hardware queues processing James Smart
2020-01-09  9:24   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 21/32] elx: efct: Unsolicited FC frame processing routines James Smart
2020-01-09  9:26   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 22/32] elx: efct: Extended link Service IO handling James Smart
2020-01-09  9:38   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 23/32] elx: efct: SCSI IO handling routines James Smart
2020-01-09  9:41   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 24/32] elx: efct: LIO backend interface routines James Smart
2020-01-09  3:56   ` Bart Van Assche
2019-12-20 22:37 ` [PATCH v2 25/32] elx: efct: Hardware IO submission routines James Smart
2020-01-09  9:52   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 26/32] elx: efct: link statistics and SFP data James Smart
2020-01-09 10:12   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 27/32] elx: efct: xport and hardware teardown routines James Smart
2020-01-09 10:14   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 28/32] elx: efct: IO timeout handling routines James Smart
2020-01-09 11:27   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 29/32] elx: efct: Firmware update, async link processing James Smart
2020-01-09 11:45   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 30/32] elx: efct: scsi_transport_fc host interface support James Smart
2020-01-09 11:46   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 31/32] elx: efct: Add Makefile and Kconfig for efct driver James Smart
2019-12-20 23:17   ` Randy Dunlap
2020-01-09 11:47   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 32/32] elx: efct: Tie into kernel Kconfig and build process James Smart
2019-12-24  7:45   ` kbuild test robot
2019-12-24 21:01   ` Nathan Chancellor
2019-12-25 16:09     ` James Smart
2020-01-09 11:47   ` Hannes Reinecke
2019-12-29 18:27 ` [PATCH v2 00/32] [NEW] efct: Broadcom (Emulex) FC Target driver Sebastian Herbszt

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=283ff0c9-58f7-65ff-6bd4-2715554bccc3@suse.de \
    --to=hare@suse.de \
    --cc=bvanassche@acm.org \
    --cc=dwagner@suse.de \
    --cc=jsmart2021@gmail.com \
    --cc=linux-scsi@vger.kernel.org \
    --cc=maier@linux.ibm.com \
    --cc=ram.vegesna@broadcom.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).