linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/2] crypto: cavium/nitrox - Allocate asymmetric crypto command queues
@ 2019-08-08 12:17 Phani Kiran Hemadri
  2019-08-08 12:17 ` [PATCH 2/2] crypto: cavium/nitrox - Configure asymmetric queue manager Hardware unit Phani Kiran Hemadri
  2019-08-15 12:06 ` [PATCH 1/2] crypto: cavium/nitrox - Allocate asymmetric crypto command queues Herbert Xu
  0 siblings, 2 replies; 3+ messages in thread
From: Phani Kiran Hemadri @ 2019-08-08 12:17 UTC (permalink / raw)
  To: herbert; +Cc: davem, linux-crypto, linux-kernel, Phani Kiran Hemadri

This patch adds support to allocate CNN55XX device AQMQ command queues
required for submitting asymmetric crypto requests.

Signed-off-by: Phani Kiran Hemadri <phemadri@marvell.com>
Reviewed-by: Srikanth Jampala <jsrikanth@marvell.com>
---
 drivers/crypto/cavium/nitrox/nitrox_dev.h |  4 ++
 drivers/crypto/cavium/nitrox/nitrox_lib.c | 66 ++++++++++++++++++++++-
 drivers/crypto/cavium/nitrox/nitrox_req.h | 30 +++++++++++
 3 files changed, 99 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h
index 5ee98eca728c..2217a2736c8e 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_dev.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h
@@ -10,6 +10,8 @@
 #define VERSION_LEN 32
 /* Maximum queues in PF mode */
 #define MAX_PF_QUEUES	64
+/* Maximum device queues */
+#define MAX_DEV_QUEUES (MAX_PF_QUEUES)
 /* Maximum UCD Blocks */
 #define CNN55XX_MAX_UCD_BLOCKS	8
 
@@ -208,6 +210,7 @@ enum vf_mode {
  * @mode: Device mode PF/VF
  * @ctx_pool: DMA pool for crypto context
  * @pkt_inq: Packet input rings
+ * @aqmq: AQM command queues
  * @qvec: MSI-X queue vectors information
  * @iov: SR-IOV informatin
  * @num_vecs: number of MSI-X vectors
@@ -234,6 +237,7 @@ struct nitrox_device {
 
 	struct dma_pool *ctx_pool;
 	struct nitrox_cmdq *pkt_inq;
+	struct nitrox_cmdq *aqmq[MAX_DEV_QUEUES] ____cacheline_aligned_in_smp;
 
 	struct nitrox_q_vector *qvec;
 	struct nitrox_iov iov;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
index 4ace9bcd603a..5cbc64b851b9 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -19,6 +19,8 @@
 
 /* packet inuput ring alignments */
 #define PKTIN_Q_ALIGN_BYTES 16
+/* AQM Queue input alignments */
+#define AQM_Q_ALIGN_BYTES 32
 
 static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes)
 {
@@ -57,11 +59,15 @@ static void nitrox_cmdq_reset(struct nitrox_cmdq *cmdq)
 
 static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq)
 {
-	struct nitrox_device *ndev = cmdq->ndev;
+	struct nitrox_device *ndev;
+
+	if (!cmdq)
+		return;
 
 	if (!cmdq->unalign_base)
 		return;
 
+	ndev = cmdq->ndev;
 	cancel_work_sync(&cmdq->backlog_qflush);
 
 	dma_free_coherent(DEV(ndev), cmdq->qsize,
@@ -78,6 +84,57 @@ static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq)
 	cmdq->instr_size = 0;
 }
 
+static void nitrox_free_aqm_queues(struct nitrox_device *ndev)
+{
+	int i;
+
+	for (i = 0; i < ndev->nr_queues; i++) {
+		nitrox_cmdq_cleanup(ndev->aqmq[i]);
+		kzfree(ndev->aqmq[i]);
+		ndev->aqmq[i] = NULL;
+	}
+}
+
+static int nitrox_alloc_aqm_queues(struct nitrox_device *ndev)
+{
+	int i, err;
+
+	for (i = 0; i < ndev->nr_queues; i++) {
+		struct nitrox_cmdq *cmdq;
+		u64 offset;
+
+		cmdq = kzalloc_node(sizeof(*cmdq), GFP_KERNEL, ndev->node);
+		if (!cmdq) {
+			err = -ENOMEM;
+			goto aqmq_fail;
+		}
+
+		cmdq->ndev = ndev;
+		cmdq->qno = i;
+		cmdq->instr_size = sizeof(struct aqmq_command_s);
+
+		/* AQM Queue Doorbell Counter Register Address */
+		offset = AQMQ_DRBLX(i);
+		cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
+		/* AQM Queue Commands Completed Count Register Address */
+		offset = AQMQ_CMD_CNTX(i);
+		cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
+
+		err = nitrox_cmdq_init(cmdq, AQM_Q_ALIGN_BYTES);
+		if (err) {
+			kzfree(cmdq);
+			goto aqmq_fail;
+		}
+		ndev->aqmq[i] = cmdq;
+	}
+
+	return 0;
+
+aqmq_fail:
+	nitrox_free_aqm_queues(ndev);
+	return err;
+}
+
 static void nitrox_free_pktin_queues(struct nitrox_device *ndev)
 {
 	int i;
@@ -222,6 +279,12 @@ int nitrox_common_sw_init(struct nitrox_device *ndev)
 	if (err)
 		destroy_crypto_dma_pool(ndev);
 
+	err = nitrox_alloc_aqm_queues(ndev);
+	if (err) {
+		nitrox_free_pktin_queues(ndev);
+		destroy_crypto_dma_pool(ndev);
+	}
+
 	return err;
 }
 
@@ -231,6 +294,7 @@ int nitrox_common_sw_init(struct nitrox_device *ndev)
  */
 void nitrox_common_sw_cleanup(struct nitrox_device *ndev)
 {
+	nitrox_free_aqm_queues(ndev);
 	nitrox_free_pktin_queues(ndev);
 	destroy_crypto_dma_pool(ndev);
 }
diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h
index efdbd0fc3e3b..f69ba02c4d25 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_req.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_req.h
@@ -399,6 +399,36 @@ struct nps_pkt_instr {
 	u64 fdata[2];
 };
 
+/**
+ * struct aqmq_command_s - The 32 byte command for AE processing.
+ * @opcode: Request opcode
+ * @param1: Request control parameter 1
+ * @param2: Request control parameter 2
+ * @dlen: Input length
+ * @dptr: Input pointer points to buffer in remote host
+ * @rptr: Result pointer points to buffer in remote host
+ * @grp: AQM Group (0..7)
+ * @cptr: Context pointer
+ */
+struct aqmq_command_s {
+	__be16 opcode;
+	__be16 param1;
+	__be16 param2;
+	__be16 dlen;
+	__be64 dptr;
+	__be64 rptr;
+	union {
+		__be64 word3;
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 grp : 3;
+		u64 cptr : 61;
+#else
+		u64 cptr : 61;
+		u64 grp : 3;
+#endif
+	};
+};
+
 /**
  * struct ctx_hdr - Book keeping data about the crypto context
  * @pool: Pool used to allocate crypto context
-- 
2.17.2


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* [PATCH 2/2] crypto: cavium/nitrox - Configure asymmetric queue manager Hardware unit
  2019-08-08 12:17 [PATCH 1/2] crypto: cavium/nitrox - Allocate asymmetric crypto command queues Phani Kiran Hemadri
@ 2019-08-08 12:17 ` Phani Kiran Hemadri
  2019-08-15 12:06 ` [PATCH 1/2] crypto: cavium/nitrox - Allocate asymmetric crypto command queues Herbert Xu
  1 sibling, 0 replies; 3+ messages in thread
From: Phani Kiran Hemadri @ 2019-08-08 12:17 UTC (permalink / raw)
  To: herbert; +Cc: davem, linux-crypto, linux-kernel, Phani Kiran Hemadri

This patch configures and initializes CNN55XX device AQM hardware unit.

Signed-off-by: Phani Kiran Hemadri <phemadri@marvell.com>
Reviewed-by: Srikanth Jampala <jsrikanth@marvell.com>
---
 drivers/crypto/cavium/nitrox/nitrox_csr.h   | 111 ++++++++++++++
 drivers/crypto/cavium/nitrox/nitrox_hal.c   | 158 ++++++++++++++++++--
 drivers/crypto/cavium/nitrox/nitrox_hal.h   |   6 +-
 drivers/crypto/cavium/nitrox/nitrox_main.c  |   4 +-
 drivers/crypto/cavium/nitrox/nitrox_sriov.c |   3 +
 5 files changed, 265 insertions(+), 17 deletions(-)

diff --git a/drivers/crypto/cavium/nitrox/nitrox_csr.h b/drivers/crypto/cavium/nitrox/nitrox_csr.h
index da1d73303780..1c8715ae0488 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_csr.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_csr.h
@@ -256,6 +256,117 @@ union aqm_grp_execmsk_hi {
 	};
 };
 
+/**
+ * struct aqmq_drbl - AQM Queue Doorbell Counter Registers
+ * @dbell_count: Doorbell Counter
+ */
+union aqmq_drbl {
+	u64 value;
+	struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+		u64 raz_32_63 : 32;
+		u64 dbell_count : 32;
+#else
+		u64 dbell_count : 32;
+		u64 raz_32_63 : 32;
+#endif
+	};
+};
+
+/**
+ * struct aqmq_qsz - AQM Queue Host Queue Size Registers
+ * @host_queue_size: Size, in numbers of 'aqmq_command_s' command
+ * of the Host Ring.
+ */
+union aqmq_qsz {
+	u64 value;
+	struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+		u64 raz_32_63 : 32;
+		u64 host_queue_size : 32;
+#else
+		u64 host_queue_size : 32;
+		u64 raz_32_63 : 32;
+#endif
+	};
+};
+
+/**
+ * struct aqmq_cmp_thr - AQM Queue Commands Completed Threshold Registers
+ * @commands_completed_threshold: Count of 'aqmq_command_s' commands executed
+ * by AE engines for which completion interrupt is asserted.
+ */
+union aqmq_cmp_thr {
+	u64 value;
+	struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+		u64 raz_32_63 : 32;
+		u64 commands_completed_threshold : 32;
+#else
+		u64 commands_completed_threshold : 32;
+		u64 raz_32_63 : 32;
+#endif
+	};
+};
+
+/**
+ * struct aqmq_cmp_cnt - AQM Queue Commands Completed Count Registers
+ * @resend: Bit to request completion interrupt Resend.
+ * @completion_status: Command completion status of the ring.
+ * @commands_completed_count: Count of 'aqmq_command_s' commands executed by
+ * AE engines.
+ */
+union aqmq_cmp_cnt {
+	u64 value;
+	struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+		u64 raz_34_63 : 30;
+		u64 resend : 1;
+		u64 completion_status : 1;
+		u64 commands_completed_count : 32;
+#else
+		u64 commands_completed_count : 32;
+		u64 completion_status : 1;
+		u64 resend : 1;
+		u64 raz_34_63 : 30;
+#endif
+	};
+};
+
+/**
+ * struct aqmq_en - AQM Queue Enable Registers
+ * @queue_status: 1 = AQMQ is enabled, 0 = AQMQ is disabled
+ */
+union aqmq_en {
+	u64 value;
+	struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+		u64 raz_1_63 : 63;
+		u64 queue_enable : 1;
+#else
+		u64 queue_enable : 1;
+		u64 raz_1_63 : 63;
+#endif
+	};
+};
+
+/**
+ * struct aqmq_activity_stat - AQM Queue Activity Status Registers
+ * @queue_active: 1 = AQMQ is active, 0 = AQMQ is quiescent
+ */
+union aqmq_activity_stat {
+	u64 value;
+	struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+		u64 raz_1_63 : 63;
+		u64 queue_active : 1;
+#else
+		u64 queue_active : 1;
+		u64 raz_1_63 : 63;
+#endif
+	};
+};
+
 /**
  * struct emu_fuse_map - EMU Fuse Map Registers
  * @ae_fuse: Fuse settings for AE 19..0
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.c b/drivers/crypto/cavium/nitrox/nitrox_hal.c
index 3f0df60267a9..34a2f4f30a7e 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_hal.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_hal.c
@@ -241,12 +241,12 @@ void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev)
 }
 
 /**
- * enable_nps_interrupts - enable NPS interrutps
+ * enable_nps_core_interrupts - enable NPS core interrutps
  * @ndev: NITROX device.
  *
- * This includes NPS core, packet in and slc interrupts.
+ * This includes NPS core interrupts.
  */
-static void enable_nps_interrupts(struct nitrox_device *ndev)
+static void enable_nps_core_interrupts(struct nitrox_device *ndev)
 {
 	union nps_core_int_ena_w1s core_int;
 
@@ -258,18 +258,9 @@ static void enable_nps_interrupts(struct nitrox_device *ndev)
 	core_int.s.npco_dma_malform = 1;
 	core_int.s.host_nps_wr_err = 1;
 	nitrox_write_csr(ndev, NPS_CORE_INT_ENA_W1S, core_int.value);
-
-	/* NPS packet in ring interrupts */
-	nitrox_write_csr(ndev, NPS_PKT_IN_RERR_LO_ENA_W1S, (~0ULL));
-	nitrox_write_csr(ndev, NPS_PKT_IN_RERR_HI_ENA_W1S, (~0ULL));
-	nitrox_write_csr(ndev, NPS_PKT_IN_ERR_TYPE_ENA_W1S, (~0ULL));
-	/* NPS packet slc port interrupts */
-	nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_HI_ENA_W1S, (~0ULL));
-	nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_LO_ENA_W1S, (~0ULL));
-	nitrox_write_csr(ndev, NPS_PKT_SLC_ERR_TYPE_ENA_W1S, (~0uLL));
 }
 
-void nitrox_config_nps_unit(struct nitrox_device *ndev)
+void nitrox_config_nps_core_unit(struct nitrox_device *ndev)
 {
 	union nps_core_gbl_vfcfg core_gbl_vfcfg;
 
@@ -281,12 +272,149 @@ void nitrox_config_nps_unit(struct nitrox_device *ndev)
 	core_gbl_vfcfg.s.ilk_disable = 1;
 	core_gbl_vfcfg.s.cfg = __NDEV_MODE_PF;
 	nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, core_gbl_vfcfg.value);
+
+	/* enable nps core interrupts */
+	enable_nps_core_interrupts(ndev);
+}
+
+/**
+ * enable_nps_pkt_interrupts - enable NPS packet interrutps
+ * @ndev: NITROX device.
+ *
+ * This includes NPS packet in and slc interrupts.
+ */
+static void enable_nps_pkt_interrupts(struct nitrox_device *ndev)
+{
+	/* NPS packet in ring interrupts */
+	nitrox_write_csr(ndev, NPS_PKT_IN_RERR_LO_ENA_W1S, (~0ULL));
+	nitrox_write_csr(ndev, NPS_PKT_IN_RERR_HI_ENA_W1S, (~0ULL));
+	nitrox_write_csr(ndev, NPS_PKT_IN_ERR_TYPE_ENA_W1S, (~0ULL));
+	/* NPS packet slc port interrupts */
+	nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_HI_ENA_W1S, (~0ULL));
+	nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_LO_ENA_W1S, (~0ULL));
+	nitrox_write_csr(ndev, NPS_PKT_SLC_ERR_TYPE_ENA_W1S, (~0uLL));
+}
+
+void nitrox_config_nps_pkt_unit(struct nitrox_device *ndev)
+{
 	/* config input and solicit ports */
 	nitrox_config_pkt_input_rings(ndev);
 	nitrox_config_pkt_solicit_ports(ndev);
 
-	/* enable interrupts */
-	enable_nps_interrupts(ndev);
+	/* enable nps packet interrupts */
+	enable_nps_pkt_interrupts(ndev);
+}
+
+static void reset_aqm_ring(struct nitrox_device *ndev, int ring)
+{
+	union aqmq_en aqmq_en_reg;
+	union aqmq_activity_stat activity_stat;
+	union aqmq_cmp_cnt cmp_cnt;
+	int max_retries = MAX_CSR_RETRIES;
+	u64 offset;
+
+	/* step 1: disable the queue */
+	offset = AQMQ_ENX(ring);
+	aqmq_en_reg.value = 0;
+	aqmq_en_reg.queue_enable = 0;
+	nitrox_write_csr(ndev, offset, aqmq_en_reg.value);
+
+	/* step 2: wait for AQMQ_ACTIVITY_STATX[QUEUE_ACTIVE] to clear */
+	usleep_range(100, 150);
+	offset = AQMQ_ACTIVITY_STATX(ring);
+	do {
+		activity_stat.value = nitrox_read_csr(ndev, offset);
+		if (!activity_stat.queue_active)
+			break;
+		udelay(50);
+	} while (max_retries--);
+
+	/* step 3: clear commands completed count */
+	offset = AQMQ_CMP_CNTX(ring);
+	cmp_cnt.value = nitrox_read_csr(ndev, offset);
+	nitrox_write_csr(ndev, offset, cmp_cnt.value);
+	usleep_range(50, 100);
+}
+
+void enable_aqm_ring(struct nitrox_device *ndev, int ring)
+{
+	union aqmq_en aqmq_en_reg;
+	u64 offset;
+
+	offset = AQMQ_ENX(ring);
+	aqmq_en_reg.value = 0;
+	aqmq_en_reg.queue_enable = 1;
+	nitrox_write_csr(ndev, offset, aqmq_en_reg.value);
+	usleep_range(50, 100);
+}
+
+void nitrox_config_aqm_rings(struct nitrox_device *ndev)
+{
+	int ring;
+
+	for (ring = 0; ring < ndev->nr_queues; ring++) {
+		struct nitrox_cmdq *cmdq = ndev->aqmq[ring];
+		union aqmq_drbl drbl;
+		union aqmq_qsz qsize;
+		union aqmq_cmp_thr cmp_thr;
+		u64 offset;
+
+		/* steps 1 - 3 */
+		reset_aqm_ring(ndev, ring);
+
+		/* step 4: clear doorbell count of ring */
+		offset = AQMQ_DRBLX(ring);
+		drbl.value = 0;
+		drbl.dbell_count = 0xFFFFFFFF;
+		nitrox_write_csr(ndev, offset, drbl.value);
+
+		/* step 5: configure host ring details */
+
+		/* set host address for next command of ring */
+		offset = AQMQ_NXT_CMDX(ring);
+		nitrox_write_csr(ndev, offset, 0ULL);
+
+		/* set host address of ring base */
+		offset = AQMQ_BADRX(ring);
+		nitrox_write_csr(ndev, offset, cmdq->dma);
+
+		/* set ring size */
+		offset = AQMQ_QSZX(ring);
+		qsize.value = 0;
+		qsize.host_queue_size = ndev->qlen;
+		nitrox_write_csr(ndev, offset, qsize.value);
+
+		/* set command completion threshold */
+		offset = AQMQ_CMP_THRX(ring);
+		cmp_thr.value = 0;
+		cmp_thr.commands_completed_threshold = 1;
+		nitrox_write_csr(ndev, offset, cmp_thr.value);
+
+		/* step 6: enable the queue */
+		enable_aqm_ring(ndev, ring);
+	}
+}
+
+static void enable_aqm_interrupts(struct nitrox_device *ndev)
+{
+	/* clear interrupt enable bits */
+	nitrox_write_csr(ndev, AQM_DBELL_OVF_LO_ENA_W1S, (~0ULL));
+	nitrox_write_csr(ndev, AQM_DBELL_OVF_HI_ENA_W1S, (~0ULL));
+	nitrox_write_csr(ndev, AQM_DMA_RD_ERR_LO_ENA_W1S, (~0ULL));
+	nitrox_write_csr(ndev, AQM_DMA_RD_ERR_HI_ENA_W1S, (~0ULL));
+	nitrox_write_csr(ndev, AQM_EXEC_NA_LO_ENA_W1S, (~0ULL));
+	nitrox_write_csr(ndev, AQM_EXEC_NA_HI_ENA_W1S, (~0ULL));
+	nitrox_write_csr(ndev, AQM_EXEC_ERR_LO_ENA_W1S, (~0ULL));
+	nitrox_write_csr(ndev, AQM_EXEC_ERR_HI_ENA_W1S, (~0ULL));
+}
+
+void nitrox_config_aqm_unit(struct nitrox_device *ndev)
+{
+	/* config aqm command queues */
+	nitrox_config_aqm_rings(ndev);
+
+	/* enable aqm interrupts */
+	enable_aqm_interrupts(ndev);
 }
 
 void nitrox_config_pom_unit(struct nitrox_device *ndev)
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.h b/drivers/crypto/cavium/nitrox/nitrox_hal.h
index d6606418ba38..48b0af039099 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_hal.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_hal.h
@@ -4,10 +4,13 @@
 
 #include "nitrox_dev.h"
 
+void nitrox_config_aqm_rings(struct nitrox_device *ndev);
+void nitrox_config_aqm_unit(struct nitrox_device *ndev);
 void nitrox_config_emu_unit(struct nitrox_device *ndev);
 void nitrox_config_pkt_input_rings(struct nitrox_device *ndev);
 void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev);
-void nitrox_config_nps_unit(struct nitrox_device *ndev);
+void nitrox_config_nps_core_unit(struct nitrox_device *ndev);
+void nitrox_config_nps_pkt_unit(struct nitrox_device *ndev);
 void nitrox_config_pom_unit(struct nitrox_device *ndev);
 void nitrox_config_rand_unit(struct nitrox_device *ndev);
 void nitrox_config_efl_unit(struct nitrox_device *ndev);
@@ -15,6 +18,7 @@ void nitrox_config_bmi_unit(struct nitrox_device *ndev);
 void nitrox_config_bmo_unit(struct nitrox_device *ndev);
 void nitrox_config_lbc_unit(struct nitrox_device *ndev);
 void invalidate_lbc(struct nitrox_device *ndev);
+void enable_aqm_ring(struct nitrox_device *ndev, int qno);
 void enable_pkt_input_ring(struct nitrox_device *ndev, int ring);
 void enable_pkt_solicit_port(struct nitrox_device *ndev, int port);
 void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index 345d3ea10b1f..bc924980e10c 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -387,7 +387,9 @@ static int nitrox_pf_hw_init(struct nitrox_device *ndev)
 	/* get cores information */
 	nitrox_get_hwinfo(ndev);
 
-	nitrox_config_nps_unit(ndev);
+	nitrox_config_nps_core_unit(ndev);
+	nitrox_config_aqm_unit(ndev);
+	nitrox_config_nps_pkt_unit(ndev);
 	nitrox_config_pom_unit(ndev);
 	nitrox_config_efl_unit(ndev);
 	/* configure IO units */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_sriov.c b/drivers/crypto/cavium/nitrox/nitrox_sriov.c
index bf439d8256ba..43287f8471d1 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_sriov.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_sriov.c
@@ -109,6 +109,9 @@ static int nitrox_pf_reinit(struct nitrox_device *ndev)
 		return err;
 	}
 
+	/* configure the AQM queues */
+	nitrox_config_aqm_rings(ndev);
+
 	/* configure the packet queues */
 	nitrox_config_pkt_input_rings(ndev);
 	nitrox_config_pkt_solicit_ports(ndev);
-- 
2.17.2


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH 1/2] crypto: cavium/nitrox - Allocate asymmetric crypto command queues
  2019-08-08 12:17 [PATCH 1/2] crypto: cavium/nitrox - Allocate asymmetric crypto command queues Phani Kiran Hemadri
  2019-08-08 12:17 ` [PATCH 2/2] crypto: cavium/nitrox - Configure asymmetric queue manager Hardware unit Phani Kiran Hemadri
@ 2019-08-15 12:06 ` Herbert Xu
  1 sibling, 0 replies; 3+ messages in thread
From: Herbert Xu @ 2019-08-15 12:06 UTC (permalink / raw)
  To: Phani Kiran Hemadri; +Cc: davem, linux-crypto, linux-kernel

On Thu, Aug 08, 2019 at 12:17:37PM +0000, Phani Kiran Hemadri wrote:
> This patch adds support to allocate CNN55XX device AQMQ command queues
> required for submitting asymmetric crypto requests.
> 
> Signed-off-by: Phani Kiran Hemadri <phemadri@marvell.com>
> Reviewed-by: Srikanth Jampala <jsrikanth@marvell.com>
> ---
>  drivers/crypto/cavium/nitrox/nitrox_dev.h |  4 ++
>  drivers/crypto/cavium/nitrox/nitrox_lib.c | 66 ++++++++++++++++++++++-
>  drivers/crypto/cavium/nitrox/nitrox_req.h | 30 +++++++++++
>  3 files changed, 99 insertions(+), 1 deletion(-)

All applied.  Thanks.
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2019-08-15 12:06 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-08-08 12:17 [PATCH 1/2] crypto: cavium/nitrox - Allocate asymmetric crypto command queues Phani Kiran Hemadri
2019-08-08 12:17 ` [PATCH 2/2] crypto: cavium/nitrox - Configure asymmetric queue manager Hardware unit Phani Kiran Hemadri
2019-08-15 12:06 ` [PATCH 1/2] crypto: cavium/nitrox - Allocate asymmetric crypto command queues Herbert Xu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).