linux-scsi.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: James Smart <jsmart2021@gmail.com>
To: linux-scsi@vger.kernel.org
Cc: maier@linux.ibm.com, dwagner@suse.de, bvanassche@acm.org,
	James Smart <jsmart2021@gmail.com>,
	Ram Vegesna <ram.vegesna@broadcom.com>
Subject: [PATCH v2 25/32] elx: efct: Hardware IO submission routines
Date: Fri, 20 Dec 2019 14:37:16 -0800	[thread overview]
Message-ID: <20191220223723.26563-26-jsmart2021@gmail.com> (raw)
In-Reply-To: <20191220223723.26563-1-jsmart2021@gmail.com>

This patch continues the efct driver population.

This patch adds driver definitions for:
Routines that write IO to Work queue, send SRRs and raw frames.

Signed-off-by: Ram Vegesna <ram.vegesna@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
---
 drivers/scsi/elx/efct/efct_hw.c | 625 ++++++++++++++++++++++++++++++++++++++++
 drivers/scsi/elx/efct/efct_hw.h |  19 ++
 2 files changed, 644 insertions(+)

diff --git a/drivers/scsi/elx/efct/efct_hw.c b/drivers/scsi/elx/efct/efct_hw.c
index 43f1ff526694..440c4fa196bf 100644
--- a/drivers/scsi/elx/efct/efct_hw.c
+++ b/drivers/scsi/elx/efct/efct_hw.c
@@ -3192,6 +3192,68 @@ efct_hw_eq_process(struct efct_hw *hw, struct hw_eq *eq,
 	return 0;
 }
 
+static int
+_efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe)
+{
+	int rc;
+	int queue_rc;
+
+	/* Every so often, set the wqec bit to generate comsummed completions */
+	if (wq->wqec_count)
+		wq->wqec_count--;
+
+	if (wq->wqec_count == 0) {
+		struct sli4_generic_wqe *genwqe = (void *)wqe->wqebuf;
+
+		genwqe->cmdtype_wqec_byte |= SLI4_GEN_WQE_WQEC;
+		wq->wqec_count = wq->wqec_set_count;
+	}
+
+	/* Decrement WQ free count */
+	wq->free_count--;
+
+	queue_rc = sli_wq_write(&wq->hw->sli, wq->queue, wqe->wqebuf);
+
+	if (queue_rc < 0)
+		rc = -1;
+	else
+		rc = 0;
+
+	return rc;
+}
+
+static void
+hw_wq_submit_pending(struct hw_wq *wq, u32 update_free_count)
+{
+	struct efct_hw_wqe *wqe;
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(&wq->queue->lock, flags);
+
+	/* Update free count with value passed in */
+	wq->free_count += update_free_count;
+
+	while ((wq->free_count > 0) && (!list_empty(&wq->pending_list))) {
+		wqe = list_first_entry(&wq->pending_list,
+				       struct efct_hw_wqe, list_entry);
+		list_del(&wqe->list_entry);
+		_efct_hw_wq_write(wq, wqe);
+
+		if (wqe->abort_wqe_submit_needed) {
+			wqe->abort_wqe_submit_needed = false;
+			sli_abort_wqe(&wq->hw->sli, wqe->wqebuf,
+				      wq->hw->sli.wqe_size,
+				      SLI_ABORT_XRI, wqe->send_abts, wqe->id,
+				      0, wqe->abort_reqtag, SLI4_CQ_DEFAULT);
+					  INIT_LIST_HEAD(&wqe->list_entry);
+			list_add_tail(&wqe->list_entry, &wq->pending_list);
+			wq->wq_pending_count++;
+		}
+	}
+
+	spin_unlock_irqrestore(&wq->queue->lock, flags);
+}
+
 void
 efct_hw_cq_process(struct efct_hw *hw, struct hw_cq *cq)
 {
@@ -3390,3 +3452,566 @@ efct_hw_flush(struct efct_hw *hw)
 
 	return 0;
 }
+
+int
+efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe)
+{
+	int rc = 0;
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(&wq->queue->lock, flags);
+	if (!list_empty(&wq->pending_list)) {
+		INIT_LIST_HEAD(&wqe->list_entry);
+		list_add_tail(&wqe->list_entry, &wq->pending_list);
+		wq->wq_pending_count++;
+		while ((wq->free_count > 0) &&
+		       ((wqe = list_first_entry(&wq->pending_list,
+					struct efct_hw_wqe, list_entry))
+			 != NULL)) {
+			list_del(&wqe->list_entry);
+			rc = _efct_hw_wq_write(wq, wqe);
+			if (rc < 0)
+				break;
+			if (wqe->abort_wqe_submit_needed) {
+				wqe->abort_wqe_submit_needed = false;
+				sli_abort_wqe(&wq->hw->sli,
+					      wqe->wqebuf,
+					      wq->hw->sli.wqe_size,
+					      SLI_ABORT_XRI,
+					      wqe->send_abts, wqe->id,
+					      0, wqe->abort_reqtag,
+					      SLI4_CQ_DEFAULT);
+
+				INIT_LIST_HEAD(&wqe->list_entry);
+				list_add_tail(&wqe->list_entry,
+					      &wq->pending_list);
+				wq->wq_pending_count++;
+			}
+		}
+	} else {
+		if (wq->free_count > 0) {
+			rc = _efct_hw_wq_write(wq, wqe);
+		} else {
+			INIT_LIST_HEAD(&wqe->list_entry);
+			list_add_tail(&wqe->list_entry, &wq->pending_list);
+			wq->wq_pending_count++;
+		}
+	}
+
+	spin_unlock_irqrestore(&wq->queue->lock, flags);
+
+	return rc;
+}
+
+/**
+ * This routine supports communication sequences consisting of a single
+ * request and single response between two endpoints. Examples include:
+ *  - Sending an ELS request.
+ *  - Sending an ELS response - To send an ELS response, the caller must provide
+ * the OX_ID from the received request.
+ *  - Sending a FC Common Transport (FC-CT) request - To send a FC-CT request,
+ * the caller must provide the R_CTL, TYPE, and DF_CTL
+ * values to place in the FC frame header.
+ */
+enum efct_hw_rtn
+efct_hw_srrs_send(struct efct_hw *hw, enum efct_hw_io_type type,
+		  struct efct_hw_io *io,
+		  struct efc_dma *send, u32 len,
+		  struct efc_dma *receive, struct efc_remote_node *rnode,
+		  union efct_hw_io_param_u *iparam,
+		  efct_hw_srrs_cb_t cb, void *arg)
+{
+	struct sli4_sge	*sge = NULL;
+	enum efct_hw_rtn	rc = EFCT_HW_RTN_SUCCESS;
+	u16	local_flags = 0;
+	u32 sge0_flags;
+	u32 sge1_flags;
+
+	if (!io || !rnode || !iparam) {
+		pr_err("bad parm hw=%p io=%p s=%p r=%p rn=%p iparm=%p\n",
+			hw, io, send, receive, rnode, iparam);
+		return EFCT_HW_RTN_ERROR;
+	}
+
+	if (hw->state != EFCT_HW_STATE_ACTIVE) {
+		efc_log_test(hw->os,
+			      "cannot send SRRS, HW state=%d\n", hw->state);
+		return EFCT_HW_RTN_ERROR;
+	}
+
+	io->rnode = rnode;
+	io->type  = type;
+	io->done = cb;
+	io->arg  = arg;
+
+	sge = io->sgl->virt;
+
+	/* clear both SGE */
+	memset(io->sgl->virt, 0, 2 * sizeof(struct sli4_sge));
+
+	sge0_flags = le32_to_cpu(sge[0].dw2_flags);
+	sge1_flags = le32_to_cpu(sge[1].dw2_flags);
+	if (send) {
+		sge[0].buffer_address_high =
+			cpu_to_le32(upper_32_bits(send->phys));
+		sge[0].buffer_address_low  =
+			cpu_to_le32(lower_32_bits(send->phys));
+
+		sge0_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
+
+		sge[0].buffer_length = cpu_to_le32(len);
+	}
+
+	if (type == EFCT_HW_ELS_REQ || type == EFCT_HW_FC_CT) {
+		sge[1].buffer_address_high =
+			cpu_to_le32(upper_32_bits(receive->phys));
+		sge[1].buffer_address_low  =
+			cpu_to_le32(lower_32_bits(receive->phys));
+
+		sge1_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
+		sge1_flags |= SLI4_SGE_LAST;
+
+		sge[1].buffer_length = cpu_to_le32(receive->size);
+	} else {
+		sge0_flags |= SLI4_SGE_LAST;
+	}
+
+	sge[0].dw2_flags = cpu_to_le32(sge0_flags);
+	sge[1].dw2_flags = cpu_to_le32(sge1_flags);
+
+	switch (type) {
+	case EFCT_HW_ELS_REQ:
+		if (!send ||
+		    sli_els_request64_wqe(&hw->sli, io->wqe.wqebuf,
+					  hw->sli.wqe_size, io->sgl,
+					*((u8 *)send->virt),
+					len, receive->size,
+					iparam->els.timeout,
+					io->indicator, io->reqtag,
+					SLI4_CQ_DEFAULT, rnode->indicator,
+					rnode->sport->indicator,
+					rnode->node_group, rnode->attached,
+					rnode->fc_id, rnode->sport->fc_id)) {
+			efc_log_err(hw->os, "REQ WQE error\n");
+			rc = EFCT_HW_RTN_ERROR;
+		}
+		break;
+	case EFCT_HW_ELS_RSP:
+		if (!send ||
+		    sli_xmit_els_rsp64_wqe(&hw->sli, io->wqe.wqebuf,
+					   hw->sli.wqe_size, send, len,
+					io->indicator, io->reqtag,
+					SLI4_CQ_DEFAULT, iparam->els.ox_id,
+					rnode->indicator,
+					rnode->sport->indicator,
+					rnode->node_group, rnode->attached,
+					rnode->fc_id,
+					local_flags, U32_MAX)) {
+			efc_log_err(hw->os, "RSP WQE error\n");
+			rc = EFCT_HW_RTN_ERROR;
+		}
+		break;
+	case EFCT_HW_ELS_RSP_SID:
+		if (!send ||
+		    sli_xmit_els_rsp64_wqe(&hw->sli, io->wqe.wqebuf,
+					   hw->sli.wqe_size, send, len,
+					io->indicator, io->reqtag,
+					SLI4_CQ_DEFAULT,
+					iparam->els_sid.ox_id,
+					rnode->indicator,
+					rnode->sport->indicator,
+					rnode->node_group, rnode->attached,
+					rnode->fc_id,
+					local_flags, iparam->els_sid.s_id)) {
+			efc_log_err(hw->os, "RSP (SID) WQE error\n");
+			rc = EFCT_HW_RTN_ERROR;
+		}
+		break;
+	case EFCT_HW_FC_CT:
+		if (!send ||
+		    sli_gen_request64_wqe(&hw->sli, io->wqe.wqebuf,
+					  hw->sli.wqe_size, io->sgl,
+					len, receive->size,
+					iparam->fc_ct.timeout, io->indicator,
+					io->reqtag, SLI4_CQ_DEFAULT,
+					rnode->node_group, rnode->fc_id,
+					rnode->indicator,
+					iparam->fc_ct.r_ctl,
+					iparam->fc_ct.type,
+					iparam->fc_ct.df_ctl)) {
+			efc_log_err(hw->os, "GEN WQE error\n");
+			rc = EFCT_HW_RTN_ERROR;
+		}
+		break;
+	case EFCT_HW_FC_CT_RSP:
+		if (!send ||
+		    sli_xmit_sequence64_wqe(&hw->sli, io->wqe.wqebuf,
+					    hw->sli.wqe_size, io->sgl,
+					len, iparam->fc_ct_rsp.timeout,
+					iparam->fc_ct_rsp.ox_id,
+					io->indicator, io->reqtag,
+					rnode->node_group, rnode->fc_id,
+					rnode->indicator,
+					iparam->fc_ct_rsp.r_ctl,
+					iparam->fc_ct_rsp.type,
+					iparam->fc_ct_rsp.df_ctl)) {
+			efc_log_err(hw->os, "XMIT SEQ WQE error\n");
+			rc = EFCT_HW_RTN_ERROR;
+		}
+		break;
+	case EFCT_HW_BLS_ACC:
+	case EFCT_HW_BLS_RJT:
+	{
+		struct sli_bls_payload	bls;
+
+		if (type == EFCT_HW_BLS_ACC) {
+			bls.type = SLI4_SLI_BLS_ACC;
+			memcpy(&bls.u.acc, iparam->bls.payload,
+			       sizeof(bls.u.acc));
+		} else {
+			bls.type = SLI4_SLI_BLS_RJT;
+			memcpy(&bls.u.rjt, iparam->bls.payload,
+			       sizeof(bls.u.rjt));
+		}
+
+		bls.ox_id = cpu_to_le16(iparam->bls.ox_id);
+		bls.rx_id = cpu_to_le16(iparam->bls.rx_id);
+
+		if (sli_xmit_bls_rsp64_wqe(&hw->sli, io->wqe.wqebuf,
+					   hw->sli.wqe_size, &bls,
+					io->indicator, io->reqtag,
+					SLI4_CQ_DEFAULT,
+					rnode->attached, rnode->node_group,
+					rnode->indicator,
+					rnode->sport->indicator,
+					rnode->fc_id, rnode->sport->fc_id,
+					U32_MAX)) {
+			efc_log_err(hw->os, "XMIT_BLS_RSP64 WQE error\n");
+			rc = EFCT_HW_RTN_ERROR;
+		}
+		break;
+	}
+	case EFCT_HW_BLS_ACC_SID:
+	{
+		struct sli_bls_payload	bls;
+
+		bls.type = SLI4_SLI_BLS_ACC;
+		memcpy(&bls.u.acc, iparam->bls_sid.payload,
+		       sizeof(bls.u.acc));
+
+		bls.ox_id = cpu_to_le16(iparam->bls_sid.ox_id);
+		bls.rx_id = cpu_to_le16(iparam->bls_sid.rx_id);
+
+		if (sli_xmit_bls_rsp64_wqe(&hw->sli, io->wqe.wqebuf,
+					   hw->sli.wqe_size, &bls,
+					io->indicator, io->reqtag,
+					SLI4_CQ_DEFAULT,
+					rnode->attached, rnode->node_group,
+					rnode->indicator,
+					rnode->sport->indicator,
+					rnode->fc_id, rnode->sport->fc_id,
+					iparam->bls_sid.s_id)) {
+			efc_log_err(hw->os, "XMIT_BLS_RSP64 WQE SID error\n");
+			rc = EFCT_HW_RTN_ERROR;
+		}
+		break;
+	}
+	default:
+		efc_log_err(hw->os, "bad SRRS type %#x\n", type);
+		rc = EFCT_HW_RTN_ERROR;
+	}
+
+	if (rc == EFCT_HW_RTN_SUCCESS) {
+		if (!io->wq)
+			io->wq = efct_hw_queue_next_wq(hw, io);
+
+		io->xbusy = true;
+
+		/*
+		 * Add IO to active io wqe list before submitting, in case the
+		 * wcqe processing preempts this thread.
+		 */
+		io->wq->use_count++;
+		efct_hw_add_io_timed_wqe(hw, io);
+		rc = efct_hw_wq_write(io->wq, &io->wqe);
+		if (rc >= 0) {
+			/* non-negative return is success */
+			rc = 0;
+		} else {
+			/* failed to write wqe, remove from active wqe list */
+			efc_log_err(hw->os,
+				     "sli_queue_write failed: %d\n", rc);
+			io->xbusy = false;
+			efct_hw_remove_io_timed_wqe(hw, io);
+		}
+	}
+
+	return rc;
+}
+
+/**
+ * Send a read, write, or response IO.
+ *
+ * This routine supports sending a higher-level IO (for example, FCP) between
+ * two endpoints as a target or initiator. Examples include:
+ *  - Sending read data and good response (target).
+ *  - Sending a response (target with no data or after receiving write data).
+ *  .
+ * This routine assumes all IOs use the SGL associated with the HW IO. Prior to
+ * calling this routine, the data should be loaded using efct_hw_io_add_sge().
+ */
+enum efct_hw_rtn
+efct_hw_io_send(struct efct_hw *hw, enum efct_hw_io_type type,
+		struct efct_hw_io *io,
+		u32 len, union efct_hw_io_param_u *iparam,
+		struct efc_remote_node *rnode, void *cb, void *arg)
+{
+	enum efct_hw_rtn	rc = EFCT_HW_RTN_SUCCESS;
+	u32	rpi;
+	bool send_wqe = true;
+
+	if (!io || !rnode || !iparam) {
+		pr_err("bad parm hw=%p io=%p iparam=%p rnode=%p\n",
+			hw, io, iparam, rnode);
+		return EFCT_HW_RTN_ERROR;
+	}
+
+	if (hw->state != EFCT_HW_STATE_ACTIVE) {
+		efc_log_err(hw->os, "cannot send IO, HW state=%d\n",
+			     hw->state);
+		return EFCT_HW_RTN_ERROR;
+	}
+
+	rpi = rnode->indicator;
+
+	/*
+	 * Save state needed during later stages
+	 */
+	io->rnode = rnode;
+	io->type  = type;
+	io->done  = cb;
+	io->arg   = arg;
+
+	/*
+	 * Format the work queue entry used to send the IO
+	 */
+	switch (type) {
+	case EFCT_HW_IO_TARGET_WRITE: {
+		u16 flags = iparam->fcp_tgt.flags;
+		struct fcp_txrdy *xfer = io->xfer_rdy.virt;
+
+		/*
+		 * Fill in the XFER_RDY for IF_TYPE 0 devices
+		 */
+		xfer->ft_data_ro = cpu_to_be32(iparam->fcp_tgt.offset);
+		xfer->ft_burst_len = cpu_to_be32(len);
+
+		if (io->xbusy)
+			flags |= SLI4_IO_CONTINUATION;
+		else
+			flags &= ~SLI4_IO_CONTINUATION;
+
+		io->tgt_wqe_timeout = iparam->fcp_tgt.timeout;
+
+		if (sli_fcp_treceive64_wqe(&hw->sli,
+					   io->wqe.wqebuf,
+					   hw->sli.wqe_size,
+					   &io->def_sgl,
+					   io->first_data_sge,
+					   iparam->fcp_tgt.offset, len,
+					   io->indicator, io->reqtag,
+					   SLI4_CQ_DEFAULT,
+					   iparam->fcp_tgt.ox_id, rpi,
+					   rnode->node_group,
+					   rnode->fc_id, flags,
+					   iparam->fcp_tgt.dif_oper,
+					   iparam->fcp_tgt.blk_size,
+					   iparam->fcp_tgt.cs_ctl,
+					   iparam->fcp_tgt.app_id)) {
+			efc_log_err(hw->os, "TRECEIVE WQE error\n");
+			rc = EFCT_HW_RTN_ERROR;
+		}
+		break;
+	}
+	case EFCT_HW_IO_TARGET_READ: {
+		u16 flags = iparam->fcp_tgt.flags;
+
+		if (io->xbusy)
+			flags |= SLI4_IO_CONTINUATION;
+		else
+			flags &= ~SLI4_IO_CONTINUATION;
+
+		io->tgt_wqe_timeout = iparam->fcp_tgt.timeout;
+		if (sli_fcp_tsend64_wqe(&hw->sli, io->wqe.wqebuf,
+					hw->sli.wqe_size, &io->def_sgl,
+					io->first_data_sge,
+					iparam->fcp_tgt.offset, len,
+					io->indicator, io->reqtag,
+					SLI4_CQ_DEFAULT, iparam->fcp_tgt.ox_id,
+					rpi, rnode->node_group,
+					rnode->fc_id, flags,
+					iparam->fcp_tgt.dif_oper,
+					iparam->fcp_tgt.blk_size,
+					iparam->fcp_tgt.cs_ctl,
+					iparam->fcp_tgt.app_id)) {
+			efc_log_err(hw->os, "TSEND WQE error\n");
+			rc = EFCT_HW_RTN_ERROR;
+		}
+		break;
+	}
+	case EFCT_HW_IO_TARGET_RSP: {
+		u16 flags = iparam->fcp_tgt.flags;
+
+		if (io->xbusy)
+			flags |= SLI4_IO_CONTINUATION;
+		else
+			flags &= ~SLI4_IO_CONTINUATION;
+
+		io->tgt_wqe_timeout = iparam->fcp_tgt.timeout;
+		if (sli_fcp_trsp64_wqe(&hw->sli, io->wqe.wqebuf,
+				       hw->sli.wqe_size, &io->def_sgl,
+				       len, io->indicator, io->reqtag,
+				       SLI4_CQ_DEFAULT, iparam->fcp_tgt.ox_id,
+					rpi, rnode->node_group, rnode->fc_id,
+					flags, iparam->fcp_tgt.cs_ctl,
+				       0, iparam->fcp_tgt.app_id)) {
+			efc_log_err(hw->os, "TRSP WQE error\n");
+			rc = EFCT_HW_RTN_ERROR;
+		}
+
+		break;
+	}
+	default:
+		efc_log_err(hw->os, "unsupported IO type %#x\n", type);
+		rc = EFCT_HW_RTN_ERROR;
+	}
+
+	if (send_wqe && rc == EFCT_HW_RTN_SUCCESS) {
+		if (!io->wq)
+			io->wq = efct_hw_queue_next_wq(hw, io);
+
+		io->xbusy = true;
+
+		/*
+		 * Add IO to active io wqe list before submitting, in case the
+		 * wcqe processing preempts this thread.
+		 */
+		hw->tcmd_wq_submit[io->wq->instance]++;
+		io->wq->use_count++;
+		efct_hw_add_io_timed_wqe(hw, io);
+		rc = efct_hw_wq_write(io->wq, &io->wqe);
+		if (rc >= 0) {
+			/* non-negative return is success */
+			rc = 0;
+		} else {
+			/* failed to write wqe, remove from active wqe list */
+			efc_log_err(hw->os,
+				     "sli_queue_write failed: %d\n", rc);
+			io->xbusy = false;
+			efct_hw_remove_io_timed_wqe(hw, io);
+		}
+	}
+
+	return rc;
+}
+
+/**
+ * Send a raw frame
+ *
+ * Using the SEND_FRAME_WQE, a frame consisting of header and payload is sent.
+ */
+enum efct_hw_rtn
+efct_hw_send_frame(struct efct_hw *hw, struct fc_frame_header *hdr,
+		   u8 sof, u8 eof, struct efc_dma *payload,
+		   struct efct_hw_send_frame_context *ctx,
+		   void (*callback)(void *arg, u8 *cqe, int status),
+		   void *arg)
+{
+	int rc;
+	struct efct_hw_wqe *wqe;
+	u32 xri;
+	struct hw_wq *wq;
+
+	wqe = &ctx->wqe;
+
+	/* populate the callback object */
+	ctx->hw = hw;
+
+	/* Fetch and populate request tag */
+	ctx->wqcb = efct_hw_reqtag_alloc(hw, callback, arg);
+	if (!ctx->wqcb) {
+		efc_log_err(hw->os, "can't allocate request tag\n");
+		return EFCT_HW_RTN_NO_RESOURCES;
+	}
+
+	/* Choose a work queue, first look for a class[1] wq, otherwise just
+	 * use wq[0]
+	 */
+	wq = efct_varray_iter_next(hw->wq_class_array[1]);
+	if (!wq)
+		wq = hw->hw_wq[0];
+
+	/* Set XRI and RX_ID in the header based on which WQ, and which
+	 * send_frame_io we are using
+	 */
+	xri = wq->send_frame_io->indicator;
+
+	/* Build the send frame WQE */
+	rc = sli_send_frame_wqe(&hw->sli, wqe->wqebuf,
+				hw->sli.wqe_size, sof, eof,
+				(u32 *)hdr, payload, payload->len,
+				EFCT_HW_SEND_FRAME_TIMEOUT, xri,
+				ctx->wqcb->instance_index);
+	if (rc) {
+		efc_log_err(hw->os, "sli_send_frame_wqe failed: %d\n",
+			     rc);
+		return EFCT_HW_RTN_ERROR;
+	}
+
+	/* Write to WQ */
+	rc = efct_hw_wq_write(wq, wqe);
+	if (rc) {
+		efc_log_err(hw->os, "efct_hw_wq_write failed: %d\n", rc);
+		return EFCT_HW_RTN_ERROR;
+	}
+
+	wq->use_count++;
+
+	return EFCT_HW_RTN_SUCCESS;
+}
+
+u32
+efct_hw_io_get_count(struct efct_hw *hw,
+		     enum efct_hw_io_count_type io_count_type)
+{
+	struct efct_hw_io *io = NULL;
+	u32 count = 0;
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(&hw->io_lock, flags);
+
+	switch (io_count_type) {
+	case EFCT_HW_IO_INUSE_COUNT:
+		list_for_each_entry(io, &hw->io_inuse, list_entry) {
+			count = count + 1;
+		}
+		break;
+	case EFCT_HW_IO_FREE_COUNT:
+		list_for_each_entry(io, &hw->io_free, list_entry) {
+			count = count + 1;
+		}
+		break;
+	case EFCT_HW_IO_WAIT_FREE_COUNT:
+		list_for_each_entry(io, &hw->io_wait_free, list_entry) {
+			count = count + 1;
+		}
+		break;
+	case EFCT_HW_IO_N_TOTAL_IO_COUNT:
+		count = hw->config.n_io;
+		break;
+	}
+
+	spin_unlock_irqrestore(&hw->io_lock, flags);
+
+	return count;
+}
diff --git a/drivers/scsi/elx/efct/efct_hw.h b/drivers/scsi/elx/efct/efct_hw.h
index 55679e40cc49..1a019594c471 100644
--- a/drivers/scsi/elx/efct/efct_hw.h
+++ b/drivers/scsi/elx/efct/efct_hw.h
@@ -952,4 +952,23 @@ efct_hw_process(struct efct_hw *hw, u32 vector, u32 max_isr_time_msec);
 extern int
 efct_hw_queue_hash_find(struct efct_queue_hash *hash, u16 id);
 
+int efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe);
+enum efct_hw_rtn
+efct_hw_send_frame(struct efct_hw *hw, struct fc_frame_header *hdr,
+		   u8 sof, u8 eof, struct efc_dma *payload,
+		struct efct_hw_send_frame_context *ctx,
+		void (*callback)(void *arg, u8 *cqe, int status),
+		void *arg);
+typedef int(*efct_hw_srrs_cb_t)(struct efct_hw_io *io,
+				struct efc_remote_node *rnode, u32 length,
+				int status, u32 ext_status, void *arg);
+extern enum efct_hw_rtn
+efct_hw_srrs_send(struct efct_hw *hw, enum efct_hw_io_type type,
+		  struct efct_hw_io *io,
+		  struct efc_dma *send, u32 len,
+		  struct efc_dma *receive, struct efc_remote_node *rnode,
+		  union efct_hw_io_param_u *iparam,
+		  efct_hw_srrs_cb_t cb,
+		  void *arg);
+
 #endif /* __EFCT_H__ */
-- 
2.13.7


  parent reply	other threads:[~2019-12-20 22:38 UTC|newest]

Thread overview: 77+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-12-20 22:36 [PATCH v2 00/32] [NEW] efct: Broadcom (Emulex) FC Target driver James Smart
2019-12-20 22:36 ` [PATCH v2 01/32] elx: libefc_sli: SLI-4 register offsets and field definitions James Smart
2020-01-08  7:11   ` Hannes Reinecke
2020-01-09  0:59     ` James Smart
2019-12-20 22:36 ` [PATCH v2 02/32] elx: libefc_sli: SLI Descriptors and Queue entries James Smart
2020-01-08  7:24   ` Hannes Reinecke
2020-01-09  1:00     ` James Smart
2019-12-20 22:36 ` [PATCH v2 03/32] elx: libefc_sli: Data structures and defines for mbox commands James Smart
2020-01-08  7:32   ` Hannes Reinecke
2020-01-09  1:03     ` James Smart
2019-12-20 22:36 ` [PATCH v2 04/32] elx: libefc_sli: queue create/destroy/parse routines James Smart
2020-01-08  7:45   ` Hannes Reinecke
2020-01-09  1:04     ` James Smart
2019-12-20 22:36 ` [PATCH v2 05/32] elx: libefc_sli: Populate and post different WQEs James Smart
2020-01-08  7:54   ` Hannes Reinecke
2020-01-09  1:04     ` James Smart
2019-12-20 22:36 ` [PATCH v2 06/32] elx: libefc_sli: bmbx routines and SLI config commands James Smart
2020-01-08  8:05   ` Hannes Reinecke
2019-12-20 22:36 ` [PATCH v2 07/32] elx: libefc_sli: APIs to setup SLI library James Smart
2020-01-08  8:22   ` Hannes Reinecke
2020-01-09  1:29     ` James Smart
2019-12-20 22:36 ` [PATCH v2 08/32] elx: libefc: Generic state machine framework James Smart
2020-01-09  7:05   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 09/32] elx: libefc: Emulex FC discovery library APIs and definitions James Smart
2020-01-09  7:16   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 10/32] elx: libefc: FC Domain state machine interfaces James Smart
2020-01-09  7:27   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 11/32] elx: libefc: SLI and FC PORT " James Smart
2020-01-09  7:34   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 12/32] elx: libefc: Remote node " James Smart
2020-01-09  8:31   ` Hannes Reinecke
2020-01-09  9:57   ` Daniel Wagner
2019-12-20 22:37 ` [PATCH v2 13/32] elx: libefc: Fabric " James Smart
2020-01-09  8:34   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 14/32] elx: libefc: FC node ELS and state handling James Smart
2020-01-09  8:39   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 15/32] elx: efct: Data structures and defines for hw operations James Smart
2020-01-09  8:41   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 16/32] elx: efct: Driver initialization routines James Smart
2020-01-09  9:01   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 17/32] elx: efct: Hardware queues creation and deletion James Smart
2020-01-09  9:10   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 18/32] elx: efct: RQ buffer, memory pool allocation and deallocation APIs James Smart
2020-01-09  9:13   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 19/32] elx: efct: Hardware IO and SGL initialization James Smart
2020-01-09  9:22   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 20/32] elx: efct: Hardware queues processing James Smart
2020-01-09  9:24   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 21/32] elx: efct: Unsolicited FC frame processing routines James Smart
2020-01-09  9:26   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 22/32] elx: efct: Extended link Service IO handling James Smart
2020-01-09  9:38   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 23/32] elx: efct: SCSI IO handling routines James Smart
2020-01-09  9:41   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 24/32] elx: efct: LIO backend interface routines James Smart
2020-01-09  3:56   ` Bart Van Assche
2019-12-20 22:37 ` James Smart [this message]
2020-01-09  9:52   ` [PATCH v2 25/32] elx: efct: Hardware IO submission routines Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 26/32] elx: efct: link statistics and SFP data James Smart
2020-01-09 10:12   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 27/32] elx: efct: xport and hardware teardown routines James Smart
2020-01-09 10:14   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 28/32] elx: efct: IO timeout handling routines James Smart
2020-01-09 11:27   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 29/32] elx: efct: Firmware update, async link processing James Smart
2020-01-09 11:45   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 30/32] elx: efct: scsi_transport_fc host interface support James Smart
2020-01-09 11:46   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 31/32] elx: efct: Add Makefile and Kconfig for efct driver James Smart
2019-12-20 23:17   ` Randy Dunlap
2020-01-09 11:47   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 32/32] elx: efct: Tie into kernel Kconfig and build process James Smart
2019-12-24  7:45   ` kbuild test robot
2019-12-24 21:01   ` Nathan Chancellor
2019-12-25 16:09     ` James Smart
2020-01-09 11:47   ` Hannes Reinecke
2019-12-29 18:27 ` [PATCH v2 00/32] [NEW] efct: Broadcom (Emulex) FC Target driver Sebastian Herbszt

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20191220223723.26563-26-jsmart2021@gmail.com \
    --to=jsmart2021@gmail.com \
    --cc=bvanassche@acm.org \
    --cc=dwagner@suse.de \
    --cc=linux-scsi@vger.kernel.org \
    --cc=maier@linux.ibm.com \
    --cc=ram.vegesna@broadcom.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).