All of lore.kernel.org
 help / color / mirror / Atom feed
From: James Smart <jsmart2021@gmail.com>
To: linux-scsi@vger.kernel.org
Cc: dwagner@suse.de, maier@linux.ibm.com, bvanassche@acm.org,
	herbszt@gmx.de, natechancellor@gmail.com, rdunlap@infradead.org,
	hare@suse.de, James Smart <jsmart2021@gmail.com>,
	Ram Vegesna <ram.vegesna@broadcom.com>
Subject: [PATCH v3 26/31] elx: efct: link statistics and SFP data
Date: Sat, 11 Apr 2020 20:32:58 -0700	[thread overview]
Message-ID: <20200412033303.29574-27-jsmart2021@gmail.com> (raw)
In-Reply-To: <20200412033303.29574-1-jsmart2021@gmail.com>

This patch continues the efct driver population.

This patch adds driver definitions for:
Routines to retrieve link stats and SFP transceiver data.

Signed-off-by: Ram Vegesna <ram.vegesna@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
---
 drivers/scsi/elx/efct/efct_hw.c | 468 ++++++++++++++++++++++++++++++++++++++++
 drivers/scsi/elx/efct/efct_hw.h |  39 ++++
 2 files changed, 507 insertions(+)

diff --git a/drivers/scsi/elx/efct/efct_hw.c b/drivers/scsi/elx/efct/efct_hw.c
index 26dd9bd1eeef..ca2fd237c7d6 100644
--- a/drivers/scsi/elx/efct/efct_hw.c
+++ b/drivers/scsi/elx/efct/efct_hw.c
@@ -8,6 +8,40 @@
 #include "efct_hw.h"
 #include "efct_unsol.h"
 
+struct efct_hw_sfp_cb_arg {
+	void (*cb)(int status, u32 bytes_written,
+		   u8 *data, void *arg);
+	void *arg;
+	struct efc_dma payload;
+};
+
+struct efct_hw_temp_cb_arg {
+	void (*cb)(int status, u32 curr_temp,
+		   u32 crit_temp_thrshld,
+		   u32 warn_temp_thrshld,
+		   u32 norm_temp_thrshld,
+		   u32 fan_off_thrshld,
+		   u32 fan_on_thrshld,
+		   void *arg);
+	void *arg;
+};
+
+struct efct_hw_link_stat_cb_arg {
+	void (*cb)(int status,
+		   u32 num_counters,
+		struct efct_hw_link_stat_counts *counters,
+		void *arg);
+	void *arg;
+};
+
+struct efct_hw_host_stat_cb_arg {
+	void (*cb)(int status,
+		   u32 num_counters,
+		struct efct_hw_host_stat_counts *counters,
+		void *arg);
+	void *arg;
+};
+
 static enum efct_hw_rtn
 efct_hw_link_event_init(struct efct_hw *hw)
 {
@@ -3035,3 +3069,437 @@ efct_hw_io_get_count(struct efct_hw *hw,
 
 	return count;
 }
+
+static int
+efct_hw_cb_sfp(struct efct_hw *hw, int status, u8 *mqe, void  *arg)
+{
+	struct efct_hw_sfp_cb_arg *cb_arg = arg;
+	struct efc_dma *payload = &cb_arg->payload;
+	struct sli4_rsp_cmn_read_transceiver_data *mbox_rsp;
+	struct efct *efct = hw->os;
+	u32 bytes_written;
+
+	mbox_rsp =
+	(struct sli4_rsp_cmn_read_transceiver_data *)payload->virt;
+	bytes_written = le32_to_cpu(mbox_rsp->hdr.response_length);
+	if (cb_arg) {
+		if (cb_arg->cb) {
+			if (!status && mbox_rsp->hdr.status)
+				status = mbox_rsp->hdr.status;
+			cb_arg->cb(status, bytes_written, mbox_rsp->page_data,
+				   cb_arg->arg);
+		}
+
+		dma_free_coherent(&efct->pcidev->dev,
+				  cb_arg->payload.size, cb_arg->payload.virt,
+				  cb_arg->payload.phys);
+		memset(&cb_arg->payload, 0, sizeof(struct efc_dma));
+		kfree(cb_arg);
+	}
+
+	kfree(mqe);
+	return EFC_SUCCESS;
+}
+
+/* Function to retrieve the SFP information */
+enum efct_hw_rtn
+efct_hw_get_sfp(struct efct_hw *hw, u16 page,
+		void (*cb)(int, u32, u8 *, void *), void *arg)
+{
+	enum efct_hw_rtn rc = EFCT_HW_RTN_ERROR;
+	struct efct_hw_sfp_cb_arg *cb_arg;
+	u8 *mbxdata;
+	struct efct *efct = hw->os;
+	struct efc_dma *dma;
+
+	/* mbxdata holds the header of the command */
+	mbxdata = kmalloc(SLI4_BMBX_SIZE, GFP_KERNEL);
+	if (!mbxdata)
+		return EFCT_HW_RTN_NO_MEMORY;
+
+	memset(mbxdata, 0, SLI4_BMBX_SIZE);
+	/*
+	 * cb_arg holds the data that will be passed to the callback on
+	 * completion
+	 */
+	cb_arg = kmalloc(sizeof(*cb_arg), GFP_KERNEL);
+	if (!cb_arg) {
+		kfree(mbxdata);
+		return EFCT_HW_RTN_NO_MEMORY;
+	}
+	memset(cb_arg, 0, sizeof(struct efct_hw_sfp_cb_arg));
+
+	cb_arg->cb = cb;
+	cb_arg->arg = arg;
+
+	/* payload holds the non-embedded portion */
+	dma = &cb_arg->payload;
+	dma->size = sizeof(struct sli4_rsp_cmn_read_transceiver_data);
+	dma->virt = dma_alloc_coherent(&efct->pcidev->dev,
+				       dma->size, &dma->phys, GFP_DMA);
+	if (!dma->virt) {
+		kfree(cb_arg);
+		kfree(mbxdata);
+		return EFCT_HW_RTN_NO_MEMORY;
+	}
+
+	/* Send the HW command */
+	if (!sli_cmd_common_read_transceiver_data(&hw->sli, mbxdata,
+						 SLI4_BMBX_SIZE, page,
+						 &cb_arg->payload))
+		rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT,
+				     efct_hw_cb_sfp, cb_arg);
+
+	if (rc != EFCT_HW_RTN_SUCCESS) {
+		efc_log_test(hw->os,
+			      "READ_TRANSCEIVER_DATA failed with status %d\n",
+			     rc);
+		dma_free_coherent(&efct->pcidev->dev,
+				  cb_arg->payload.size, cb_arg->payload.virt,
+				  cb_arg->payload.phys);
+		memset(&cb_arg->payload, 0, sizeof(struct efc_dma));
+		kfree(cb_arg);
+		kfree(mbxdata);
+	}
+
+	return rc;
+}
+
+static int
+efct_hw_cb_temp(struct efct_hw *hw, int status, u8 *mqe, void  *arg)
+{
+	struct sli4_cmd_dump4 *mbox_rsp = (struct sli4_cmd_dump4 *)mqe;
+	struct efct_hw_temp_cb_arg *cb_arg = arg;
+	u32 curr_temp = le32_to_cpu(mbox_rsp->resp_data[0]); /* word 5 */
+	u32 crit_temp_thrshld =
+			le32_to_cpu(mbox_rsp->resp_data[1]); /* word 6 */
+	u32 warn_temp_thrshld =
+			le32_to_cpu(mbox_rsp->resp_data[2]); /* word 7 */
+	u32 norm_temp_thrshld =
+			le32_to_cpu(mbox_rsp->resp_data[3]); /* word 8 */
+	u32 fan_off_thrshld =
+			le32_to_cpu(mbox_rsp->resp_data[4]);   /* word 9 */
+	u32 fan_on_thrshld =
+			le32_to_cpu(mbox_rsp->resp_data[5]);    /* word 10 */
+
+	if (cb_arg) {
+		if (cb_arg->cb) {
+			if (status == 0 && le16_to_cpu(mbox_rsp->hdr.status))
+				status = le16_to_cpu(mbox_rsp->hdr.status);
+			cb_arg->cb(status,
+				   curr_temp,
+				   crit_temp_thrshld,
+				   warn_temp_thrshld,
+				   norm_temp_thrshld,
+				   fan_off_thrshld,
+				   fan_on_thrshld,
+				   cb_arg->arg);
+		}
+
+		kfree(cb_arg);
+	}
+	kfree(mqe);
+
+	return EFC_SUCCESS;
+}
+
+/* Function to retrieve the temperature information */
+enum efct_hw_rtn
+efct_hw_get_temperature(struct efct_hw *hw,
+			void (*cb)(int status,
+				   u32 curr_temp,
+				u32 crit_temp_thrshld,
+				u32 warn_temp_thrshld,
+				u32 norm_temp_thrshld,
+				u32 fan_off_thrshld,
+				u32 fan_on_thrshld,
+				void *arg),
+			void *arg)
+{
+	enum efct_hw_rtn rc = EFCT_HW_RTN_ERROR;
+	struct efct_hw_temp_cb_arg *cb_arg;
+	u8 *mbxdata;
+
+	mbxdata = kmalloc(SLI4_BMBX_SIZE, GFP_KERNEL);
+	if (!mbxdata)
+		return EFCT_HW_RTN_NO_MEMORY;
+
+	memset(mbxdata, 0, SLI4_BMBX_SIZE);
+
+	cb_arg = kmalloc(sizeof(*cb_arg), GFP_KERNEL);
+	if (!cb_arg) {
+		kfree(mbxdata);
+		return EFCT_HW_RTN_NO_MEMORY;
+	}
+
+	cb_arg->cb = cb;
+	cb_arg->arg = arg;
+
+	/* Send the HW command */
+	if (!sli_cmd_dump_type4(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
+			       SLI4_WKI_TAG_SAT_TEM))
+		rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT,
+				     efct_hw_cb_temp, cb_arg);
+
+	if (rc != EFCT_HW_RTN_SUCCESS) {
+		efc_log_test(hw->os, "DUMP_TYPE4 failed\n");
+		kfree(mbxdata);
+		kfree(cb_arg);
+	}
+
+	return rc;
+}
+
+static int
+efct_hw_cb_link_stat(struct efct_hw *hw, int status,
+		     u8 *mqe, void  *arg)
+{
+	struct sli4_cmd_read_link_stats *mbox_rsp;
+	struct efct_hw_link_stat_cb_arg *cb_arg = arg;
+	struct efct_hw_link_stat_counts counts[EFCT_HW_LINK_STAT_MAX];
+	u32 num_counters;
+	u32 mbox_rsp_flags = 0;
+
+	mbox_rsp = (struct sli4_cmd_read_link_stats *)mqe;
+	mbox_rsp_flags = le32_to_cpu(mbox_rsp->dw1_flags);
+	num_counters = (mbox_rsp_flags & SLI4_READ_LNKSTAT_GEC) ? 20 : 13;
+	memset(counts, 0, sizeof(struct efct_hw_link_stat_counts) *
+				 EFCT_HW_LINK_STAT_MAX);
+
+	counts[EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT].overflow =
+		(mbox_rsp_flags & SLI4_READ_LNKSTAT_W02OF);
+	counts[EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].overflow =
+		(mbox_rsp_flags & SLI4_READ_LNKSTAT_W03OF);
+	counts[EFCT_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].overflow =
+		(mbox_rsp_flags & SLI4_READ_LNKSTAT_W04OF);
+	counts[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].overflow =
+		(mbox_rsp_flags & SLI4_READ_LNKSTAT_W05OF);
+	counts[EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].overflow =
+		(mbox_rsp_flags & SLI4_READ_LNKSTAT_W06OF);
+	counts[EFCT_HW_LINK_STAT_CRC_COUNT].overflow =
+		(mbox_rsp_flags & SLI4_READ_LNKSTAT_W07OF);
+	counts[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].overflow =
+		(mbox_rsp_flags & SLI4_READ_LNKSTAT_W08OF);
+	counts[EFCT_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].overflow =
+		(mbox_rsp_flags & SLI4_READ_LNKSTAT_W09OF);
+	counts[EFCT_HW_LINK_STAT_ARB_TIMEOUT_COUNT].overflow =
+		(mbox_rsp_flags & SLI4_READ_LNKSTAT_W10OF);
+	counts[EFCT_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].overflow =
+		(mbox_rsp_flags & SLI4_READ_LNKSTAT_W11OF);
+	counts[EFCT_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].overflow =
+		(mbox_rsp_flags & SLI4_READ_LNKSTAT_W12OF);
+	counts[EFCT_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].overflow =
+		(mbox_rsp_flags & SLI4_READ_LNKSTAT_W13OF);
+	counts[EFCT_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].overflow =
+		(mbox_rsp_flags & SLI4_READ_LNKSTAT_W14OF);
+	counts[EFCT_HW_LINK_STAT_RCV_EOFA_COUNT].overflow =
+		(mbox_rsp_flags & SLI4_READ_LNKSTAT_W15OF);
+	counts[EFCT_HW_LINK_STAT_RCV_EOFDTI_COUNT].overflow =
+		(mbox_rsp_flags & SLI4_READ_LNKSTAT_W16OF);
+	counts[EFCT_HW_LINK_STAT_RCV_EOFNI_COUNT].overflow =
+		(mbox_rsp_flags & SLI4_READ_LNKSTAT_W17OF);
+	counts[EFCT_HW_LINK_STAT_RCV_SOFF_COUNT].overflow =
+		(mbox_rsp_flags & SLI4_READ_LNKSTAT_W18OF);
+	counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].overflow =
+		(mbox_rsp_flags & SLI4_READ_LNKSTAT_W19OF);
+	counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].overflow =
+		(mbox_rsp_flags & SLI4_READ_LNKSTAT_W20OF);
+	counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].overflow =
+		(mbox_rsp_flags & SLI4_READ_LNKSTAT_W21OF);
+	counts[EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT].counter =
+		 le32_to_cpu(mbox_rsp->linkfail_errcnt);
+	counts[EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter =
+		 le32_to_cpu(mbox_rsp->losssync_errcnt);
+	counts[EFCT_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].counter =
+		 le32_to_cpu(mbox_rsp->losssignal_errcnt);
+	counts[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter =
+		 le32_to_cpu(mbox_rsp->primseq_errcnt);
+	counts[EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter =
+		 le32_to_cpu(mbox_rsp->inval_txword_errcnt);
+	counts[EFCT_HW_LINK_STAT_CRC_COUNT].counter =
+		le32_to_cpu(mbox_rsp->crc_errcnt);
+	counts[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].counter =
+		le32_to_cpu(mbox_rsp->primseq_eventtimeout_cnt);
+	counts[EFCT_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].counter =
+		 le32_to_cpu(mbox_rsp->elastic_bufoverrun_errcnt);
+	counts[EFCT_HW_LINK_STAT_ARB_TIMEOUT_COUNT].counter =
+		 le32_to_cpu(mbox_rsp->arbit_fc_al_timeout_cnt);
+	counts[EFCT_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].counter =
+		 le32_to_cpu(mbox_rsp->adv_rx_buftor_to_buf_credit);
+	counts[EFCT_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].counter =
+		 le32_to_cpu(mbox_rsp->curr_rx_buf_to_buf_credit);
+	counts[EFCT_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].counter =
+		 le32_to_cpu(mbox_rsp->adv_tx_buf_to_buf_credit);
+	counts[EFCT_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].counter =
+		 le32_to_cpu(mbox_rsp->curr_tx_buf_to_buf_credit);
+	counts[EFCT_HW_LINK_STAT_RCV_EOFA_COUNT].counter =
+		 le32_to_cpu(mbox_rsp->rx_eofa_cnt);
+	counts[EFCT_HW_LINK_STAT_RCV_EOFDTI_COUNT].counter =
+		 le32_to_cpu(mbox_rsp->rx_eofdti_cnt);
+	counts[EFCT_HW_LINK_STAT_RCV_EOFNI_COUNT].counter =
+		 le32_to_cpu(mbox_rsp->rx_eofni_cnt);
+	counts[EFCT_HW_LINK_STAT_RCV_SOFF_COUNT].counter =
+		 le32_to_cpu(mbox_rsp->rx_soff_cnt);
+	counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].counter =
+		 le32_to_cpu(mbox_rsp->rx_dropped_no_aer_cnt);
+	counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].counter =
+		 le32_to_cpu(mbox_rsp->rx_dropped_no_avail_rpi_rescnt);
+	counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].counter =
+		 le32_to_cpu(mbox_rsp->rx_dropped_no_avail_xri_rescnt);
+
+	if (cb_arg) {
+		if (cb_arg->cb) {
+			if (status == 0 && le16_to_cpu(mbox_rsp->hdr.status))
+				status = le16_to_cpu(mbox_rsp->hdr.status);
+			cb_arg->cb(status, num_counters, counts, cb_arg->arg);
+		}
+
+		kfree(cb_arg);
+	}
+	kfree(mqe);
+
+	return EFC_SUCCESS;
+}
+
+enum efct_hw_rtn
+efct_hw_get_link_stats(struct efct_hw *hw,
+		       u8 req_ext_counters,
+		       u8 clear_overflow_flags,
+		       u8 clear_all_counters,
+		       void (*cb)(int status,
+				  u32 num_counters,
+			struct efct_hw_link_stat_counts *counters,
+			void *arg),
+		       void *arg)
+{
+	enum efct_hw_rtn rc = EFCT_HW_RTN_ERROR;
+	struct efct_hw_link_stat_cb_arg *cb_arg;
+	u8 *mbxdata;
+
+	mbxdata = kmalloc(SLI4_BMBX_SIZE, GFP_ATOMIC);
+	if (!mbxdata)
+		return EFCT_HW_RTN_NO_MEMORY;
+
+	memset(mbxdata, 0, SLI4_BMBX_SIZE);
+
+	cb_arg = kmalloc(sizeof(*cb_arg), GFP_ATOMIC);
+	if (!cb_arg) {
+		kfree(mbxdata);
+		return EFCT_HW_RTN_NO_MEMORY;
+	}
+
+	cb_arg->cb = cb;
+	cb_arg->arg = arg;
+
+	/* Send the HW command */
+	if (!sli_cmd_read_link_stats(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
+				    req_ext_counters,
+				    clear_overflow_flags,
+				    clear_all_counters))
+		rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT,
+				     efct_hw_cb_link_stat, cb_arg);
+
+	if (rc != EFCT_HW_RTN_SUCCESS) {
+		kfree(mbxdata);
+		kfree(cb_arg);
+	}
+
+	return rc;
+}
+
+static int
+efct_hw_cb_host_stat(struct efct_hw *hw, int status,
+		     u8 *mqe, void  *arg)
+{
+	struct sli4_cmd_read_status *mbox_rsp =
+					(struct sli4_cmd_read_status *)mqe;
+	struct efct_hw_host_stat_cb_arg *cb_arg = arg;
+	struct efct_hw_host_stat_counts counts[EFCT_HW_HOST_STAT_MAX];
+	u32 num_counters = EFCT_HW_HOST_STAT_MAX;
+
+	memset(counts, 0, sizeof(struct efct_hw_host_stat_counts) *
+		   EFCT_HW_HOST_STAT_MAX);
+
+	counts[EFCT_HW_HOST_STAT_TX_KBYTE_COUNT].counter =
+		 le32_to_cpu(mbox_rsp->trans_kbyte_cnt);
+	counts[EFCT_HW_HOST_STAT_RX_KBYTE_COUNT].counter =
+		 le32_to_cpu(mbox_rsp->recv_kbyte_cnt);
+	counts[EFCT_HW_HOST_STAT_TX_FRAME_COUNT].counter =
+		 le32_to_cpu(mbox_rsp->trans_frame_cnt);
+	counts[EFCT_HW_HOST_STAT_RX_FRAME_COUNT].counter =
+		 le32_to_cpu(mbox_rsp->recv_frame_cnt);
+	counts[EFCT_HW_HOST_STAT_TX_SEQ_COUNT].counter =
+		 le32_to_cpu(mbox_rsp->trans_seq_cnt);
+	counts[EFCT_HW_HOST_STAT_RX_SEQ_COUNT].counter =
+		 le32_to_cpu(mbox_rsp->recv_seq_cnt);
+	counts[EFCT_HW_HOST_STAT_TOTAL_EXCH_ORIG].counter =
+		 le32_to_cpu(mbox_rsp->tot_exchanges_orig);
+	counts[EFCT_HW_HOST_STAT_TOTAL_EXCH_RESP].counter =
+		 le32_to_cpu(mbox_rsp->tot_exchanges_resp);
+	counts[EFCT_HW_HOSY_STAT_RX_P_BSY_COUNT].counter =
+		 le32_to_cpu(mbox_rsp->recv_p_bsy_cnt);
+	counts[EFCT_HW_HOST_STAT_RX_F_BSY_COUNT].counter =
+		 le32_to_cpu(mbox_rsp->recv_f_bsy_cnt);
+	counts[EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT].counter =
+		 le32_to_cpu(mbox_rsp->no_rq_buf_dropped_frames_cnt);
+	counts[EFCT_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT].counter =
+		 le32_to_cpu(mbox_rsp->empty_rq_timeout_cnt);
+	counts[EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT].counter =
+		 le32_to_cpu(mbox_rsp->no_xri_dropped_frames_cnt);
+	counts[EFCT_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT].counter =
+		 le32_to_cpu(mbox_rsp->empty_xri_pool_cnt);
+
+	if (cb_arg) {
+		if (cb_arg->cb) {
+			if (status == 0 && le16_to_cpu(mbox_rsp->hdr.status))
+				status = le16_to_cpu(mbox_rsp->hdr.status);
+			cb_arg->cb(status, num_counters, counts, cb_arg->arg);
+		}
+
+		kfree(cb_arg);
+	}
+	kfree(mqe);
+
+	return EFC_SUCCESS;
+}
+
+enum efct_hw_rtn
+efct_hw_get_host_stats(struct efct_hw *hw, u8 cc,
+		       void (*cb)(int status,
+				  u32 num_counters,
+				  struct efct_hw_host_stat_counts *counters,
+				  void *arg),
+		       void *arg)
+{
+	enum efct_hw_rtn rc = EFCT_HW_RTN_ERROR;
+	struct efct_hw_host_stat_cb_arg *cb_arg;
+	u8 *mbxdata;
+
+	mbxdata = kmalloc(SLI4_BMBX_SIZE, GFP_ATOMIC);
+	if (!mbxdata)
+		return EFCT_HW_RTN_NO_MEMORY;
+
+	memset(mbxdata, 0, SLI4_BMBX_SIZE);
+
+	cb_arg = kmalloc(sizeof(*cb_arg), GFP_ATOMIC);
+	if (!cb_arg) {
+		kfree(mbxdata);
+		return EFCT_HW_RTN_NO_MEMORY;
+	}
+
+	 cb_arg->cb = cb;
+	 cb_arg->arg = arg;
+
+	 /* Send the HW command to get the host stats */
+	if (!sli_cmd_read_status(&hw->sli, mbxdata, SLI4_BMBX_SIZE, cc))
+		rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT,
+				     efct_hw_cb_host_stat, cb_arg);
+
+	if (rc != EFCT_HW_RTN_SUCCESS) {
+		efc_log_test(hw->os, "READ_HOST_STATS failed\n");
+		kfree(mbxdata);
+		kfree(cb_arg);
+	}
+
+	return rc;
+}
diff --git a/drivers/scsi/elx/efct/efct_hw.h b/drivers/scsi/elx/efct/efct_hw.h
index 36a832f32616..0b6838c7f924 100644
--- a/drivers/scsi/elx/efct/efct_hw.h
+++ b/drivers/scsi/elx/efct/efct_hw.h
@@ -733,4 +733,43 @@ efct_hw_srrs_send(struct efct_hw *hw, enum efct_hw_io_type type,
 		  efct_hw_srrs_cb_t cb,
 		  void *arg);
 
+/* Function for retrieving SFP data */
+extern enum efct_hw_rtn
+efct_hw_get_sfp(struct efct_hw *hw, u16 page,
+		void (*cb)(int, u32, u8 *, void *), void *arg);
+
+/* Function for retrieving temperature data */
+extern enum efct_hw_rtn
+efct_hw_get_temperature(struct efct_hw *hw,
+			void (*efct_hw_temp_cb_t)(int status,
+						  u32 curr_temp,
+				u32 crit_temp_thrshld,
+				u32 warn_temp_thrshld,
+				u32 norm_temp_thrshld,
+				u32 fan_off_thrshld,
+				u32 fan_on_thrshld,
+				void *arg),
+			void *arg);
+
+/* Function for retrieving link statistics */
+extern enum efct_hw_rtn
+efct_hw_get_link_stats(struct efct_hw *hw,
+		       u8 req_ext_counters,
+		u8 clear_overflow_flags,
+		u8 clear_all_counters,
+		void (*efct_hw_link_stat_cb_t)(int status,
+					       u32 num_counters,
+			struct efct_hw_link_stat_counts *counters,
+			void *arg),
+		void *arg);
+/* Function for retrieving host statistics */
+extern enum efct_hw_rtn
+efct_hw_get_host_stats(struct efct_hw *hw,
+		       u8 cc,
+		void (*efct_hw_host_stat_cb_t)(int status,
+					       u32 num_counters,
+			struct efct_hw_host_stat_counts *counters,
+			void *arg),
+		void *arg);
+
 #endif /* __EFCT_H__ */
-- 
2.16.4


  parent reply	other threads:[~2020-04-12  3:34 UTC|newest]

Thread overview: 124+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-04-12  3:32 [PATCH v3 00/31] [NEW] efct: Broadcom (Emulex) FC Target driver James Smart
2020-04-12  3:32 ` [PATCH v3 01/31] elx: libefc_sli: SLI-4 register offsets and field definitions James Smart
2020-04-14 15:23   ` Daniel Wagner
2020-04-22  4:28     ` James Smart
2020-04-15 12:06   ` Hannes Reinecke
2020-04-23  1:52   ` Roman Bolshakov
2020-04-12  3:32 ` [PATCH v3 02/31] elx: libefc_sli: SLI Descriptors and Queue entries James Smart
2020-04-14 18:02   ` Daniel Wagner
2020-04-22  4:41     ` James Smart
2020-04-15 12:14   ` Hannes Reinecke
2020-04-15 17:43     ` James Bottomley
2020-04-22  4:44     ` James Smart
2020-04-12  3:32 ` [PATCH v3 03/31] elx: libefc_sli: Data structures and defines for mbox commands James Smart
2020-04-14 19:01   ` Daniel Wagner
2020-04-15 12:22   ` Hannes Reinecke
2020-04-12  3:32 ` [PATCH v3 04/31] elx: libefc_sli: queue create/destroy/parse routines James Smart
2020-04-15 10:04   ` Daniel Wagner
2020-04-22  5:05     ` James Smart
2020-04-24  7:29       ` Daniel Wagner
2020-04-24 15:21         ` James Smart
2020-04-15 12:27   ` Hannes Reinecke
2020-04-12  3:32 ` [PATCH v3 05/31] elx: libefc_sli: Populate and post different WQEs James Smart
2020-04-15 14:34   ` Daniel Wagner
2020-04-22  5:08     ` James Smart
2020-04-12  3:32 ` [PATCH v3 06/31] elx: libefc_sli: bmbx routines and SLI config commands James Smart
2020-04-15 16:10   ` Daniel Wagner
2020-04-22  5:12     ` James Smart
2020-04-12  3:32 ` [PATCH v3 07/31] elx: libefc_sli: APIs to setup SLI library James Smart
2020-04-15 12:49   ` Hannes Reinecke
2020-04-15 17:06   ` Daniel Wagner
2020-04-12  3:32 ` [PATCH v3 08/31] elx: libefc: Generic state machine framework James Smart
2020-04-15 12:37   ` Hannes Reinecke
2020-04-15 17:20   ` Daniel Wagner
2020-04-12  3:32 ` [PATCH v3 09/31] elx: libefc: Emulex FC discovery library APIs and definitions James Smart
2020-04-15 12:41   ` Hannes Reinecke
2020-04-15 17:32   ` Daniel Wagner
2020-04-12  3:32 ` [PATCH v3 10/31] elx: libefc: FC Domain state machine interfaces James Smart
2020-04-15 12:50   ` Hannes Reinecke
2020-04-15 17:50   ` Daniel Wagner
2020-04-12  3:32 ` [PATCH v3 11/31] elx: libefc: SLI and FC PORT " James Smart
2020-04-15 15:38   ` Hannes Reinecke
2020-04-22 23:12     ` James Smart
2020-04-15 18:04   ` Daniel Wagner
2020-04-12  3:32 ` [PATCH v3 12/31] elx: libefc: Remote node " James Smart
2020-04-15 15:51   ` Hannes Reinecke
2020-04-23  1:35     ` James Smart
2020-04-23  8:02       ` Daniel Wagner
2020-04-23 18:24         ` James Smart
2020-04-15 18:19   ` Daniel Wagner
2020-04-23  1:32     ` James Smart
2020-04-23  7:49       ` Daniel Wagner
2020-04-12  3:32 ` [PATCH v3 13/31] elx: libefc: Fabric " James Smart
2020-04-15 18:51   ` Daniel Wagner
2020-04-16  6:37   ` Hannes Reinecke
2020-04-23  1:38     ` James Smart
2020-04-12  3:32 ` [PATCH v3 14/31] elx: libefc: FC node ELS and state handling James Smart
2020-04-15 18:56   ` Daniel Wagner
2020-04-23  2:50     ` James Smart
2020-04-23  8:05       ` Daniel Wagner
2020-04-23  8:12         ` Nathan Chancellor
2020-04-16  6:47   ` Hannes Reinecke
2020-04-23  2:55     ` James Smart
2020-04-12  3:32 ` [PATCH v3 15/31] elx: efct: Data structures and defines for hw operations James Smart
2020-04-16  6:51   ` Hannes Reinecke
2020-04-23  2:57     ` James Smart
2020-04-16  7:22   ` Daniel Wagner
2020-04-23  2:59     ` James Smart
2020-04-12  3:32 ` [PATCH v3 16/31] elx: efct: Driver initialization routines James Smart
2020-04-16  7:11   ` Hannes Reinecke
2020-04-23  3:09     ` James Smart
2020-04-16  8:03   ` Daniel Wagner
2020-04-12  3:32 ` [PATCH v3 17/31] elx: efct: Hardware queues creation and deletion James Smart
2020-04-16  7:14   ` Hannes Reinecke
2020-04-16  8:24   ` Daniel Wagner
2020-04-12  3:32 ` [PATCH v3 18/31] elx: efct: RQ buffer, memory pool allocation and deallocation APIs James Smart
2020-04-16  7:24   ` Hannes Reinecke
2020-04-23  3:16     ` James Smart
2020-04-16  8:41   ` Daniel Wagner
2020-04-12  3:32 ` [PATCH v3 19/31] elx: efct: Hardware IO and SGL initialization James Smart
2020-04-16  7:32   ` Hannes Reinecke
2020-04-16  8:47   ` Daniel Wagner
2020-04-12  3:32 ` [PATCH v3 20/31] elx: efct: Hardware queues processing James Smart
2020-04-16  7:37   ` Hannes Reinecke
2020-04-16  9:17   ` Daniel Wagner
2020-04-12  3:32 ` [PATCH v3 21/31] elx: efct: Unsolicited FC frame processing routines James Smart
2020-04-16  9:36   ` Daniel Wagner
2020-04-12  3:32 ` [PATCH v3 22/31] elx: efct: Extended link Service IO handling James Smart
2020-04-16  7:58   ` Hannes Reinecke
2020-04-23  3:30     ` James Smart
2020-04-16  9:49   ` Daniel Wagner
2020-04-12  3:32 ` [PATCH v3 23/31] elx: efct: SCSI IO handling routines James Smart
2020-04-16 11:40   ` Daniel Wagner
2020-04-12  3:32 ` [PATCH v3 24/31] elx: efct: LIO backend interface routines James Smart
2020-04-12  4:57   ` Bart Van Assche
2020-04-16 11:48     ` Daniel Wagner
2020-04-22  4:20     ` James Smart
2020-04-22  5:09       ` Bart Van Assche
2020-04-23  1:39         ` James Smart
2020-04-16  8:02   ` Hannes Reinecke
2020-04-16 12:34   ` Daniel Wagner
2020-04-22  4:20     ` James Smart
2020-04-12  3:32 ` [PATCH v3 25/31] elx: efct: Hardware IO submission routines James Smart
2020-04-16  8:10   ` Hannes Reinecke
2020-04-16 12:45     ` Daniel Wagner
2020-04-23  3:37       ` James Smart
2020-04-16 12:44   ` Daniel Wagner
2020-04-12  3:32 ` James Smart [this message]
2020-04-16 12:55   ` [PATCH v3 26/31] elx: efct: link statistics and SFP data Daniel Wagner
2020-04-12  3:32 ` [PATCH v3 27/31] elx: efct: xport and hardware teardown routines James Smart
2020-04-16  9:45   ` Hannes Reinecke
2020-04-16 13:01   ` Daniel Wagner
2020-04-12  3:33 ` [PATCH v3 28/31] elx: efct: Firmware update, async link processing James Smart
2020-04-16 10:01   ` Hannes Reinecke
2020-04-16 13:10   ` Daniel Wagner
2020-04-12  3:33 ` [PATCH v3 29/31] elx: efct: scsi_transport_fc host interface support James Smart
2020-04-12  3:33 ` [PATCH v3 30/31] elx: efct: Add Makefile and Kconfig for efct driver James Smart
2020-04-16 10:02   ` Hannes Reinecke
2020-04-16 13:15   ` Daniel Wagner
2020-04-12  3:33 ` [PATCH v3 31/31] elx: efct: Tie into kernel Kconfig and build process James Smart
2020-04-12  6:16   ` kbuild test robot
2020-04-12  6:16     ` kbuild test robot
2020-04-12  7:56   ` kbuild test robot
2020-04-12  7:56     ` kbuild test robot
2020-04-16 13:15   ` Daniel Wagner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200412033303.29574-27-jsmart2021@gmail.com \
    --to=jsmart2021@gmail.com \
    --cc=bvanassche@acm.org \
    --cc=dwagner@suse.de \
    --cc=hare@suse.de \
    --cc=herbszt@gmx.de \
    --cc=linux-scsi@vger.kernel.org \
    --cc=maier@linux.ibm.com \
    --cc=natechancellor@gmail.com \
    --cc=ram.vegesna@broadcom.com \
    --cc=rdunlap@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.