All of lore.kernel.org
 help / color / mirror / Atom feed
From: Kenneth Lee <nek.in.cn@gmail.com>
To: Jonathan Corbet <corbet@lwn.net>,
	Herbert Xu <herbert@gondor.apana.org.au>,
	"David S . Miller" <davem@davemloft.net>,
	Joerg Roedel <joro@8bytes.org>,
	Alex Williamson <alex.williamson@redhat.com>,
	Kenneth Lee <liguozhu@hisilicon.com>,
	Hao Fang <fanghao11@huawei.com>,
	Zhou Wang <wangzhou1@hisilicon.com>,
	Zaibo Xu <xuzaibo@huawei.com>,
	Philippe Ombredanne <pombredanne@nexb.com>,
	Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-crypto@vger.kernel.org, iommu@lists.linux-foundation.org,
	kvm@vger.kernel.org, linux-accelerators@lists.ozlabs.org,
	Lu Baolu <baolu.lu@linux.intel.com>,
	Sanjay Kumar <sanjay.k.kumar@intel.com>
Cc: linuxarm@huawei.com
Subject: [PATCH 4/7] crypto: add hisilicon Queue Manager driver
Date: Mon,  3 Sep 2018 08:52:01 +0800	[thread overview]
Message-ID: <20180903005204.26041-5-nek.in.cn@gmail.com> (raw)
In-Reply-To: <20180903005204.26041-1-nek.in.cn@gmail.com>

From: Kenneth Lee <liguozhu@hisilicon.com>

Hisilicon QM is a general IP used by some Hisilicon accelerators. It
provides a general PCIE interface for the CPU and the accelerator to share
a group of queues.

This commit includes a library used by the accelerator driver to access
the QM hardware.

Signed-off-by: Kenneth Lee <liguozhu@hisilicon.com>
Signed-off-by: Zhou Wang <wangzhou1@hisilicon.com>
Signed-off-by: Hao Fang <fanghao11@huawei.com>
---
 drivers/crypto/Makefile           |   2 +-
 drivers/crypto/hisilicon/Kconfig  |   8 +
 drivers/crypto/hisilicon/Makefile |   1 +
 drivers/crypto/hisilicon/qm.c     | 820 ++++++++++++++++++++++++++++++
 drivers/crypto/hisilicon/qm.h     | 110 ++++
 5 files changed, 940 insertions(+), 1 deletion(-)
 create mode 100644 drivers/crypto/hisilicon/qm.c
 create mode 100644 drivers/crypto/hisilicon/qm.h

diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index c23396f32c8a..f3a7abe42424 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -46,4 +46,4 @@ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
 obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
 obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
 obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
-obj-y += hisilicon/
+obj-$(CONFIG_CRYPTO_DEV_HISILICON) += hisilicon/
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index 8ca9c503bcb0..02a6eef84101 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -1,4 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0
+config CRYPTO_DEV_HISILICON
+	tristate "Support for HISILICON CRYPTO ACCELERATOR"
+	help
+	  Enable this to use Hisilicon Hardware Accelerators
 
 config CRYPTO_DEV_HISI_SEC
 	tristate "Support for Hisilicon SEC crypto block cipher accelerator"
@@ -12,3 +16,7 @@ config CRYPTO_DEV_HISI_SEC
 
 	  To compile this as a module, choose M here: the module
 	  will be called hisi_sec.
+
+config CRYPTO_DEV_HISI_QM
+	tristate
+	depends on ARM64 && PCI
diff --git a/drivers/crypto/hisilicon/Makefile b/drivers/crypto/hisilicon/Makefile
index 463f46ace182..05e9052e0f52 100644
--- a/drivers/crypto/hisilicon/Makefile
+++ b/drivers/crypto/hisilicon/Makefile
@@ -1,2 +1,3 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_CRYPTO_DEV_HISI_SEC) += sec/
+obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += qm.o
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
new file mode 100644
index 000000000000..ea618b4d0929
--- /dev/null
+++ b/drivers/crypto/hisilicon/qm.c
@@ -0,0 +1,820 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <asm/page.h>
+#include <linux/bitmap.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/irqreturn.h>
+#include <linux/log2.h>
+#include "qm.h"
+
+#define QM_DEF_Q_NUM			128
+
+/* eq/aeq irq enable */
+#define QM_VF_AEQ_INT_SOURCE		0x0
+#define QM_VF_AEQ_INT_MASK		0x4
+#define QM_VF_EQ_INT_SOURCE		0x8
+#define QM_VF_EQ_INT_MASK		0xc
+
+/* mailbox */
+#define MAILBOX_CMD_SQC			0x0
+#define MAILBOX_CMD_CQC			0x1
+#define MAILBOX_CMD_EQC			0x2
+#define MAILBOX_CMD_SQC_BT		0x4
+#define MAILBOX_CMD_CQC_BT		0x5
+
+#define MAILBOX_CMD_SEND_BASE		0x300
+#define MAILBOX_EVENT_SHIFT		8
+#define MAILBOX_STATUS_SHIFT		9
+#define MAILBOX_BUSY_SHIFT		13
+#define MAILBOX_OP_SHIFT		14
+#define MAILBOX_QUEUE_SHIFT		16
+
+/* sqc shift */
+#define SQ_HEAD_SHIFT			0
+#define SQ_TAIL_SHIFI			16
+#define SQ_HOP_NUM_SHIFT		0
+#define SQ_PAGE_SIZE_SHIFT		4
+#define SQ_BUF_SIZE_SHIFT		8
+#define SQ_SQE_SIZE_SHIFT		12
+#define SQ_HEAD_IDX_SIG_SHIFT		0
+#define SQ_TAIL_IDX_SIG_SHIFT		0
+#define SQ_CQN_SHIFT			0
+#define SQ_PRIORITY_SHIFT		0
+#define SQ_ORDERS_SHIFT			4
+#define SQ_TYPE_SHIFT			8
+
+#define SQ_TYPE_MASK			0xf
+
+/* cqc shift */
+#define CQ_HEAD_SHIFT			0
+#define CQ_TAIL_SHIFI			16
+#define CQ_HOP_NUM_SHIFT		0
+#define CQ_PAGE_SIZE_SHIFT		4
+#define CQ_BUF_SIZE_SHIFT		8
+#define CQ_SQE_SIZE_SHIFT		12
+#define CQ_PASID			0
+#define CQ_HEAD_IDX_SIG_SHIFT		0
+#define CQ_TAIL_IDX_SIG_SHIFT		0
+#define CQ_CQN_SHIFT			0
+#define CQ_PRIORITY_SHIFT		16
+#define CQ_ORDERS_SHIFT			0
+#define CQ_TYPE_SHIFT			0
+#define CQ_PHASE_SHIFT			0
+#define CQ_FLAG_SHIFT			1
+
+#define CQC_HEAD_INDEX(cqc)		((cqc)->cq_head)
+#define CQC_PHASE(cqc)			(((cqc)->dw6) & 0x1)
+#define CQC_CQ_ADDRESS(cqc)		(((u64)((cqc)->cq_base_h) << 32) | \
+					 ((cqc)->cq_base_l))
+#define CQC_PHASE_BIT			0x1
+
+/* eqc shift */
+#define MB_EQC_EQE_SHIFT		12
+#define MB_EQC_PHASE_SHIFT		16
+
+#define EQC_HEAD_INDEX(eqc)		((eqc)->eq_head)
+#define EQC_TAIL_INDEX(eqc)		((eqc)->eq_tail)
+#define EQC_PHASE(eqc)			((((eqc)->dw6) >> 16) & 0x1)
+
+#define EQC_PHASE_BIT			0x00010000
+
+/* cqe shift */
+#define CQE_PHASE(cqe)			((cqe)->w7 & 0x1)
+#define CQE_SQ_NUM(cqe)			((cqe)->sq_num)
+#define CQE_SQ_HEAD_INDEX(cqe)		((cqe)->sq_head)
+
+/* eqe shift */
+#define EQE_PHASE(eqe)			(((eqe)->dw0 >> 16) & 0x1)
+#define EQE_CQN(eqe)			(((eqe)->dw0) & 0xffff)
+
+#define QM_EQE_CQN_MASK			0xffff
+
+/* doorbell */
+#define DOORBELL_CMD_SQ			0
+#define DOORBELL_CMD_CQ			1
+#define DOORBELL_CMD_EQ			2
+#define DOORBELL_CMD_AEQ		3
+
+#define DOORBELL_CMD_SEND_BASE		0x340
+
+#define QM_MEM_START_INIT		0x100040
+#define QM_MEM_INIT_DONE		0x100044
+#define QM_VFT_CFG_RDY			0x10006c
+#define QM_VFT_CFG_OP_WR		0x100058
+#define QM_VFT_CFG_TYPE			0x10005c
+#define QM_SQC_VFT			0x0
+#define QM_CQC_VFT			0x1
+#define QM_VFT_CFG_ADDRESS		0x100060
+#define QM_VFT_CFG_OP_ENABLE		0x100054
+
+#define QM_VFT_CFG_DATA_L		0x100064
+#define QM_VFT_CFG_DATA_H		0x100068
+#define QM_SQC_VFT_BUF_SIZE		(7ULL << 8)
+#define QM_SQC_VFT_SQC_SIZE		(5ULL << 12)
+#define QM_SQC_VFT_INDEX_NUMBER		(1ULL << 16)
+#define QM_SQC_VFT_BT_INDEX_SHIFT	22
+#define QM_SQC_VFT_START_SQN_SHIFT	28
+#define QM_SQC_VFT_VALID		(1ULL << 44)
+#define QM_CQC_VFT_BUF_SIZE		(7ULL << 8)
+#define QM_CQC_VFT_SQC_SIZE		(5ULL << 12)
+#define QM_CQC_VFT_INDEX_NUMBER		(1ULL << 16)
+#define QM_CQC_VFT_BT_INDEX_SHIFT	22
+#define QM_CQC_VFT_VALID		(1ULL << 28)
+
+struct cqe {
+	__le32 rsvd0;
+	__le16 cmd_id;
+	__le16 rsvd1;
+	__le16 sq_head;
+	__le16 sq_num;
+	__le16 rsvd2;
+	__le16 w7;
+};
+
+struct eqe {
+	__le32 dw0;
+};
+
+struct sqc {
+	__le16 head;
+	__le16 tail;
+	__le32 base_l;
+	__le32 base_h;
+	__le32 dw3;
+	__le16 qes;
+	__le16 rsvd0;
+	__le16 pasid;
+	__le16 w11;
+	__le16 cq_num;
+	__le16 w13;
+	__le32 rsvd1;
+};
+
+struct cqc {
+	__le16 head;
+	__le16 tail;
+	__le32 base_l;
+	__le32 base_h;
+	__le32 dw3;
+	__le16 qes;
+	__le16 rsvd0;
+	__le16 pasid;
+	__le16 w11;
+	__le32 dw6;
+	__le32 rsvd1;
+};
+
+#define INIT_QC(qc, base) do { \
+	(qc)->head = 0; \
+	(qc)->tail = 0; \
+	(qc)->base_l = lower_32_bits(base); \
+	(qc)->base_h = upper_32_bits(base); \
+	(qc)->pasid = 0; \
+	(qc)->w11 = 0; \
+	(qc)->rsvd1 = 0; \
+	(qc)->qes = QM_Q_DEPTH - 1; \
+} while (0)
+
+struct eqc {
+	__le16 head;
+	__le16 tail;
+	__le32 base_l;
+	__le32 base_h;
+	__le32 dw3;
+	__le32 rsvd[2];
+	__le32 dw6;
+};
+
+struct mailbox {
+	__le16 w0;
+	__le16 queue_num;
+	__le32 base_l;
+	__le32 base_h;
+	__le32 rsvd;
+};
+
+struct doorbell {
+	__le16 queue_num;
+	__le16 cmd;
+	__le16 index;
+	__le16 priority;
+};
+
+#define QM_DMA_BUF(p, buf) ((struct buf *)(p)->buf.addr)
+#define QM_SQC(p) QM_DMA_BUF(p, sqc)
+#define QM_CQC(p) QM_DMA_BUF(p, cqc)
+#define QM_EQC(p) QM_DMA_BUF(p, eqc)
+#define QM_EQE(p) QM_DMA_BUF(p, eqe)
+
+#define QP_SQE_DMA(qp) ((qp)->scqe.dma)
+#define QP_CQE(qp) ((struct cqe *)((qp)->scqe.addr + \
+				   qp->qm->sqe_size * QM_Q_DEPTH))
+#define QP_CQE_DMA(qp) ((qp)->scqe.dma + qp->qm->sqe_size * QM_Q_DEPTH)
+
+static inline void qm_writel(struct qm_info *qm, u32 val, u32 offset)
+{
+	writel(val, qm->io_base + offset);
+}
+
+struct qm_info;
+
+struct hisi_acc_qm_hw_ops {
+	int (*vft_config)(struct qm_info *qm, u16 base, u32 number);
+};
+
+static inline int hacc_qm_mb_is_busy(struct qm_info *qm)
+{
+	u32 val;
+
+	return readl_relaxed_poll_timeout(QM_ADDR(qm, MAILBOX_CMD_SEND_BASE),
+		val, !((val >> MAILBOX_BUSY_SHIFT) & 0x1), 10, 1000);
+}
+
+static inline void qm_mb_write(struct qm_info *qm, void *src)
+{
+	void __iomem *fun_base = QM_ADDR(qm, MAILBOX_CMD_SEND_BASE);
+	unsigned long tmp0 = 0, tmp1 = 0;
+
+	asm volatile("ldp %0, %1, %3\n"
+		     "stp %0, %1, %2\n"
+		     "dsb sy\n"
+		     : "=&r" (tmp0),
+		       "=&r" (tmp1),
+		       "+Q" (*((char *)fun_base))
+		     : "Q" (*((char *)src))
+		     : "memory");
+}
+
+static int qm_mb(struct qm_info *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
+		 bool op, bool event)
+{
+	struct mailbox mailbox;
+	int i = 0;
+	int ret = 0;
+
+	memset(&mailbox, 0, sizeof(struct mailbox));
+
+	mailbox.w0 = cmd |
+		     (event ? 0x1 << MAILBOX_EVENT_SHIFT : 0) |
+		     (op ? 0x1 << MAILBOX_OP_SHIFT : 0) |
+		     (0x1 << MAILBOX_BUSY_SHIFT);
+	mailbox.queue_num = queue;
+	mailbox.base_l = lower_32_bits(dma_addr);
+	mailbox.base_h = upper_32_bits(dma_addr);
+	mailbox.rsvd = 0;
+
+	mutex_lock(&qm->mailbox_lock);
+
+	while (hacc_qm_mb_is_busy(qm) && i < 10)
+		i++;
+	if (i >= 10) {
+		ret = -EBUSY;
+		dev_err(&qm->pdev->dev, "QM mail box is busy!");
+		goto busy_unlock;
+	}
+	qm_mb_write(qm, &mailbox);
+	i = 0;
+	while (hacc_qm_mb_is_busy(qm) && i < 10)
+		i++;
+	if (i >= 10) {
+		ret = -EBUSY;
+		dev_err(&qm->pdev->dev, "QM mail box is still busy!");
+		goto busy_unlock;
+	}
+
+busy_unlock:
+	mutex_unlock(&qm->mailbox_lock);
+
+	return ret;
+}
+
+static void qm_db(struct qm_info *qm, u16 qn, u8 cmd, u16 index, u8 priority)
+{
+	u64 doorbell = 0;
+
+	doorbell = (u64)qn | ((u64)cmd << 16);
+	doorbell |= ((u64)index | ((u64)priority << 16)) << 32;
+
+	writeq(doorbell, QM_ADDR(qm, DOORBELL_CMD_SEND_BASE));
+}
+
+/* @return 0 - cq/eq event, 1 - async event, 2 - abnormal error */
+static u32 qm_get_irq_source(struct qm_info *qm)
+{
+	return readl(QM_ADDR(qm, QM_VF_EQ_INT_SOURCE));
+}
+
+static inline struct hisi_qp *to_hisi_qp(struct qm_info *qm, struct eqe *eqe)
+{
+	u16 cqn = eqe->dw0 & QM_EQE_CQN_MASK;
+	struct hisi_qp *qp;
+
+	read_lock(&qm->qps_lock);
+	qp = qm->qp_array[cqn];
+	read_unlock(&qm->qps_lock);
+
+	return qp;
+}
+
+static inline void qm_cq_head_update(struct hisi_qp *qp)
+{
+	if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) {
+		QM_CQC(qp)->dw6 = QM_CQC(qp)->dw6 ^ CQC_PHASE_BIT;
+		qp->qp_status.cq_head = 0;
+	} else {
+		qp->qp_status.cq_head++;
+	}
+}
+
+static inline void qm_poll_qp(struct hisi_qp *qp, struct qm_info *qm)
+{
+	struct cqe *cqe;
+
+	cqe = QP_CQE(qp) + qp->qp_status.cq_head;
+
+	if (qp->req_cb) {
+		while (CQE_PHASE(cqe) == CQC_PHASE(QM_CQC(qp))) {
+			dma_rmb();
+			qp->req_cb(qp, QP_SQE_ADDR(qp) +
+				   qm->sqe_size *
+				   CQE_SQ_HEAD_INDEX(cqe));
+			qm_cq_head_update(qp);
+			cqe = QP_CQE(qp) + qp->qp_status.cq_head;
+		}
+	} else if (qp->event_cb) {
+		qp->event_cb(qp);
+		qm_cq_head_update(qp);
+		cqe = QP_CQE(qp) + qp->qp_status.cq_head;
+	}
+
+	qm_db(qm, qp->queue_id, DOORBELL_CMD_CQ, qp->qp_status.cq_head, 0);
+
+	/* set c_flag */
+	qm_db(qm, qp->queue_id, DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1);
+}
+
+static irqreturn_t qm_irq_thread(int irq, void *data)
+{
+	struct qm_info *qm = data;
+	struct eqe *eqe = QM_EQE(qm) + qm->eq_head;
+	struct eqc *eqc = QM_EQC(qm);
+	struct hisi_qp *qp;
+
+	while (EQE_PHASE(eqe) == EQC_PHASE(eqc)) {
+		qp = to_hisi_qp(qm, eqe);
+		if (qp)
+			qm_poll_qp(qp, qm);
+
+		if (qm->eq_head == QM_Q_DEPTH - 1) {
+			eqc->dw6 = eqc->dw6 ^ EQC_PHASE_BIT;
+			eqe = QM_EQE(qm);
+			qm->eq_head = 0;
+		} else {
+			eqe++;
+			qm->eq_head++;
+		}
+
+		qm_db(qm, 0, DOORBELL_CMD_EQ, qm->eq_head, 0);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void qm_init_qp_status(struct hisi_qp *qp)
+{
+	struct hisi_acc_qp_status *qp_status = &qp->qp_status;
+
+	qp_status->sq_tail = 0;
+	qp_status->sq_head = 0;
+	qp_status->cq_head = 0;
+	qp_status->cqc_phase = 1;
+	qp_status->is_sq_full = 0;
+}
+
+/* check if bit in regs is 1 */
+static inline int qm_acc_check(struct qm_info *qm, u32 offset, u32 bit)
+{
+	int val;
+
+	return readl_relaxed_poll_timeout(QM_ADDR(qm, offset), val,
+					  val & BIT(bit), 10, 1000);
+}
+
+static inline int qm_init_q_buffer(struct device *dev, size_t size,
+				   struct qm_dma_buffer *db)
+{
+	db->size = size;
+	db->addr = dma_zalloc_coherent(dev, size, &db->dma, GFP_KERNEL);
+	if (!db->addr)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static inline void qm_uninit_q_buffer(struct device *dev,
+				      struct qm_dma_buffer *db)
+{
+	dma_free_coherent(dev, db->size, db->addr, db->dma);
+}
+
+static inline int qm_init_bt(struct qm_info *qm, struct device *dev,
+			     size_t size, struct qm_dma_buffer *db, int mb_cmd)
+{
+	int ret;
+
+	ret = qm_init_q_buffer(dev, size, db);
+	if (ret)
+		return -ENOMEM;
+
+	ret = qm_mb(qm, mb_cmd, db->dma, 0, 0, 0);
+	if (ret) {
+		qm_uninit_q_buffer(dev, db);
+		return ret;
+	}
+
+	return 0;
+}
+
+/* the config should be conducted after hisi_acc_init_qm_mem() */
+static int qm_vft_common_config(struct qm_info *qm, u16 base, u32 number)
+{
+	u64 tmp;
+	int ret;
+
+	ret = qm_acc_check(qm, QM_VFT_CFG_RDY, 0);
+	if (ret)
+		return ret;
+	qm_writel(qm, 0x0, QM_VFT_CFG_OP_WR);
+	qm_writel(qm, QM_SQC_VFT, QM_VFT_CFG_TYPE);
+	qm_writel(qm, qm->pdev->devfn, QM_VFT_CFG_ADDRESS);
+
+	tmp = QM_SQC_VFT_BUF_SIZE			|
+	      QM_SQC_VFT_SQC_SIZE			|
+	      QM_SQC_VFT_INDEX_NUMBER			|
+	      QM_SQC_VFT_VALID				|
+	      (u64)base << QM_SQC_VFT_START_SQN_SHIFT;
+
+	qm_writel(qm, tmp & 0xffffffff, QM_VFT_CFG_DATA_L);
+	qm_writel(qm, tmp >> 32, QM_VFT_CFG_DATA_H);
+
+	qm_writel(qm, 0x0, QM_VFT_CFG_RDY);
+	qm_writel(qm, 0x1, QM_VFT_CFG_OP_ENABLE);
+	ret = qm_acc_check(qm, QM_VFT_CFG_RDY, 0);
+	if (ret)
+		return ret;
+	tmp = 0;
+
+	qm_writel(qm, 0x0, QM_VFT_CFG_OP_WR);
+	qm_writel(qm, QM_CQC_VFT, QM_VFT_CFG_TYPE);
+	qm_writel(qm, qm->pdev->devfn, QM_VFT_CFG_ADDRESS);
+
+	tmp = QM_CQC_VFT_BUF_SIZE			|
+	      QM_CQC_VFT_SQC_SIZE			|
+	      QM_CQC_VFT_INDEX_NUMBER			|
+	      QM_CQC_VFT_VALID;
+
+	qm_writel(qm, tmp & 0xffffffff, QM_VFT_CFG_DATA_L);
+	qm_writel(qm, tmp >> 32, QM_VFT_CFG_DATA_H);
+
+	qm_writel(qm, 0x0, QM_VFT_CFG_RDY);
+	qm_writel(qm, 0x1, QM_VFT_CFG_OP_ENABLE);
+	ret = qm_acc_check(qm, QM_VFT_CFG_RDY, 0);
+	if (ret)
+		return ret;
+	return 0;
+}
+
+static struct hisi_acc_qm_hw_ops qm_hw_ops_v1 = {
+	.vft_config = qm_vft_common_config,
+};
+
+struct hisi_qp *hisi_qm_create_qp(struct qm_info *qm, u8 alg_type)
+{
+	struct hisi_qp *qp;
+	int qp_index;
+	int ret;
+
+	write_lock(&qm->qps_lock);
+	qp_index = find_first_zero_bit(qm->qp_bitmap, qm->qp_num);
+	if (qp_index >= qm->qp_num) {
+		write_unlock(&qm->qps_lock);
+		return ERR_PTR(-EBUSY);
+	}
+	set_bit(qp_index, qm->qp_bitmap);
+
+	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+	if (!qp) {
+		ret = -ENOMEM;
+		write_unlock(&qm->qps_lock);
+		goto err_with_bitset;
+	}
+
+	qp->queue_id = qp_index;
+	qp->qm = qm;
+	qp->alg_type = alg_type;
+	qm_init_qp_status(qp);
+
+	write_unlock(&qm->qps_lock);
+	return qp;
+
+err_with_bitset:
+	clear_bit(qp_index, qm->qp_bitmap);
+
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_create_qp);
+
+int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
+{
+	struct qm_info *qm = qp->qm;
+	struct device *dev = &qm->pdev->dev;
+	struct sqc *sqc;
+	struct cqc *cqc;
+	int qp_index = qp->queue_id;
+	int pasid = arg;
+	int ret;
+
+	/* set sq and cq context */
+	qp->sqc.addr = QM_SQC(qm) + qp_index;
+	qp->sqc.dma = qm->sqc.dma + qp_index * sizeof(struct sqc);
+	sqc = QM_SQC(qp);
+
+	qp->cqc.addr = QM_CQC(qm) + qp_index;
+	qp->cqc.dma = qm->cqc.dma + qp_index * sizeof(struct cqc);
+	cqc = QM_CQC(qp);
+
+	/* allocate sq and cq */
+	ret = qm_init_q_buffer(dev,
+		qm->sqe_size * QM_Q_DEPTH + sizeof(struct cqe) * QM_Q_DEPTH,
+		&qp->scqe);
+	if (ret)
+		return ret;
+
+	INIT_QC(sqc, qp->scqe.dma);
+	sqc->pasid = pasid;
+	sqc->dw3 = (0 << SQ_HOP_NUM_SHIFT)      |
+		   (0 << SQ_PAGE_SIZE_SHIFT)    |
+		   (0 << SQ_BUF_SIZE_SHIFT)     |
+		   (ilog2(qm->sqe_size) << SQ_SQE_SIZE_SHIFT);
+	sqc->cq_num = qp_index;
+	sqc->w13 = 0 << SQ_PRIORITY_SHIFT	|
+		   1 << SQ_ORDERS_SHIFT		|
+		   (qp->alg_type & SQ_TYPE_MASK) << SQ_TYPE_SHIFT;
+
+	ret = qm_mb(qm, MAILBOX_CMD_SQC, qp->sqc.dma, qp_index, 0, 0);
+	if (ret)
+		return ret;
+
+	INIT_QC(cqc, qp->scqe.dma + qm->sqe_size * QM_Q_DEPTH);
+	cqc->dw3 = (0 << CQ_HOP_NUM_SHIFT)	|
+		   (0 << CQ_PAGE_SIZE_SHIFT)	|
+		   (0 << CQ_BUF_SIZE_SHIFT)	|
+		   (4 << CQ_SQE_SIZE_SHIFT);
+	cqc->dw6 = 1 << CQ_PHASE_SHIFT | 1 << CQ_FLAG_SHIFT;
+
+	ret = qm_mb(qm, MAILBOX_CMD_CQC, qp->cqc.dma, qp_index, 0, 0);
+	if (ret)
+		return ret;
+
+	write_lock(&qm->qps_lock);
+	qm->qp_array[qp_index] = qp;
+	init_completion(&qp->completion);
+	write_unlock(&qm->qps_lock);
+
+	return qp_index;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
+
+void hisi_qm_release_qp(struct hisi_qp *qp)
+{
+	struct qm_info *qm = qp->qm;
+	struct device *dev = &qm->pdev->dev;
+
+	write_lock(&qm->qps_lock);
+	qm->qp_array[qp->queue_id] = NULL;
+	bitmap_clear(qm->qp_bitmap, qp->queue_id, 1);
+	write_unlock(&qm->qps_lock);
+
+	qm_uninit_q_buffer(dev, &qp->scqe);
+	kfree(qp);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_release_qp);
+
+static void *qm_get_avail_sqe(struct hisi_qp *qp)
+{
+	struct hisi_acc_qp_status *qp_status = &qp->qp_status;
+	void *sq_base = QP_SQE_ADDR(qp);
+	u16 sq_tail = qp_status->sq_tail;
+
+	if (qp_status->is_sq_full == 1)
+		return NULL;
+
+	return sq_base + sq_tail * qp->qm->sqe_size;
+}
+
+int hisi_qp_send(struct hisi_qp *qp, void *msg)
+{
+	struct hisi_acc_qp_status *qp_status = &qp->qp_status;
+	u16 sq_tail = qp_status->sq_tail;
+	u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH;
+	unsigned long timeout = 100;
+	void *sqe = qm_get_avail_sqe(qp);
+
+	if (!sqe)
+		return -ENOSPC;
+
+	memcpy(sqe, msg, qp->qm->sqe_size);
+
+	qm_db(qp->qm, qp->queue_id, DOORBELL_CMD_SQ, sq_tail_next, 0);
+
+	qp_status->sq_tail = sq_tail_next;
+
+	if (qp_status->sq_tail == qp_status->sq_head)
+		qp_status->is_sq_full = 1;
+
+	/* wait until job finished */
+	wait_for_completion_timeout(&qp->completion, timeout);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(hisi_qp_send);
+
+int hisi_qm_init(const char *dev_name, struct qm_info *qm)
+{
+	struct pci_dev *pdev = qm->pdev;
+	int ret;
+
+	ret = pci_enable_device_mem(pdev);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Can't enable device mem!\n");
+		return ret;
+	}
+
+	ret = pci_request_mem_regions(pdev, dev_name);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Can't request mem regions!\n");
+		goto err_with_pcidev;
+	}
+
+	qm->dev_name = dev_name;
+	qm->phys_base = pci_resource_start(pdev, 2);
+	qm->size = pci_resource_len(qm->pdev, 2);
+	qm->io_base = devm_ioremap(&pdev->dev, qm->phys_base, qm->size);
+	if (!qm->io_base) {
+		ret = -EIO;
+		goto err_with_mem_regions;
+	}
+
+	dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	pci_set_master(pdev);
+
+	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Enable MSI vectors fail!\n");
+		goto err_with_mem_regions;
+	}
+
+	qm->eq_head = 0;
+	mutex_init(&qm->mailbox_lock);
+	rwlock_init(&qm->qps_lock);
+
+	if (qm->ver)
+		qm->ops = &qm_hw_ops_v1;
+
+	return 0;
+
+err_with_mem_regions:
+	pci_release_mem_regions(pdev);
+err_with_pcidev:
+	pci_disable_device(pdev);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_init);
+
+void hisi_qm_uninit(struct qm_info *qm)
+{
+	struct pci_dev *pdev = qm->pdev;
+
+	pci_free_irq_vectors(pdev);
+	pci_release_mem_regions(pdev);
+	pci_disable_device(pdev);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_uninit);
+
+static irqreturn_t qm_irq(int irq, void *data)
+{
+	struct qm_info *qm = data;
+	u32 int_source;
+
+	int_source = qm_get_irq_source(qm);
+	if (int_source)
+		return IRQ_WAKE_THREAD;
+
+	dev_err(&qm->pdev->dev, "invalid int source %d\n", int_source);
+
+	return IRQ_HANDLED;
+}
+
+int hisi_qm_start(struct qm_info *qm)
+{
+	struct pci_dev *pdev = qm->pdev;
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	if (qm->pdev->is_physfn)
+		qm->ops->vft_config(qm, qm->qp_base, qm->qp_num);
+
+	ret = qm_init_q_buffer(dev, sizeof(struct eqc), &qm->eqc);
+	if (ret)
+		goto err_out;
+
+	ret = qm_init_q_buffer(dev, sizeof(struct eqe) * QM_Q_DEPTH, &qm->eqe);
+	if (ret)
+		goto err_with_eqc;
+
+	QM_EQC(qm)->base_l = lower_32_bits(qm->eqe.dma);
+	QM_EQC(qm)->base_h = upper_32_bits(qm->eqe.dma);
+	QM_EQC(qm)->dw3 = 2 << MB_EQC_EQE_SHIFT;
+	QM_EQC(qm)->dw6 = (QM_Q_DEPTH - 1) | (1 << MB_EQC_PHASE_SHIFT);
+	ret = qm_mb(qm, MAILBOX_CMD_EQC, qm->eqc.dma, 0, 0, 0);
+	if (ret)
+		goto err_with_eqe;
+
+	qm->qp_bitmap = kcalloc(BITS_TO_LONGS(qm->qp_num), sizeof(long),
+				GFP_KERNEL);
+	if (!qm->qp_bitmap)
+		goto err_with_eqe;
+
+	qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp *),
+			       GFP_KERNEL);
+	if (!qm->qp_array)
+		goto err_with_bitmap;
+
+	/* Init sqc_bt */
+	ret = qm_init_bt(qm, dev, sizeof(struct sqc) * qm->qp_num, &qm->sqc,
+			 MAILBOX_CMD_SQC_BT);
+	if (ret)
+		goto err_with_qp_array;
+
+	/* Init cqc_bt */
+	ret = qm_init_bt(qm, dev, sizeof(struct cqc) * qm->qp_num, &qm->cqc,
+			 MAILBOX_CMD_CQC_BT);
+	if (ret)
+		goto err_with_sqc;
+
+	ret = request_threaded_irq(pci_irq_vector(pdev, 0), qm_irq,
+				   qm_irq_thread, IRQF_SHARED, qm->dev_name,
+				   qm);
+	if (ret)
+		goto err_with_cqc;
+
+	writel(0x0, QM_ADDR(qm, QM_VF_EQ_INT_MASK));
+
+	return 0;
+
+err_with_cqc:
+	qm_uninit_q_buffer(dev, &qm->cqc);
+err_with_sqc:
+	qm_uninit_q_buffer(dev, &qm->sqc);
+err_with_qp_array:
+	kfree(qm->qp_array);
+err_with_bitmap:
+	kfree(qm->qp_bitmap);
+err_with_eqe:
+	qm_uninit_q_buffer(dev, &qm->eqe);
+err_with_eqc:
+	qm_uninit_q_buffer(dev, &qm->eqc);
+err_out:
+	return ret;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_start);
+
+void hisi_qm_stop(struct qm_info *qm)
+{
+	struct pci_dev *pdev = qm->pdev;
+	struct device *dev = &pdev->dev;
+
+	free_irq(pci_irq_vector(pdev, 0), qm);
+	qm_uninit_q_buffer(dev, &qm->cqc);
+	kfree(qm->qp_array);
+	kfree(qm->qp_bitmap);
+	qm_uninit_q_buffer(dev, &qm->eqe);
+	qm_uninit_q_buffer(dev, &qm->eqc);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_stop);
+
+/* put qm into init state, so the acce config become available */
+int hisi_qm_mem_start(struct qm_info *qm)
+{
+	u32 val;
+
+	qm_writel(qm, 0x1, QM_MEM_START_INIT);
+	return readl_relaxed_poll_timeout(QM_ADDR(qm, QM_MEM_INIT_DONE), val,
+					  val & BIT(0), 10, 1000);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_mem_start);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
+MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver");
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
new file mode 100644
index 000000000000..0e81182ac6a8
--- /dev/null
+++ b/drivers/crypto/hisilicon/qm.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef HISI_ACC_QM_H
+#define HISI_ACC_QM_H
+
+#include <linux/dmapool.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#define QM_CQE_SIZE			16
+/* default queue depth for sq/cq/eq */
+#define QM_Q_DEPTH			1024
+
+/* qm user domain */
+#define QM_ARUSER_M_CFG_1		0x100088
+#define QM_ARUSER_M_CFG_ENABLE		0x100090
+#define QM_AWUSER_M_CFG_1		0x100098
+#define QM_AWUSER_M_CFG_ENABLE		0x1000a0
+#define QM_WUSER_M_CFG_ENABLE		0x1000a8
+
+/* qm cache */
+#define QM_CACHE_CTL			0x100050
+#define QM_AXI_M_CFG			0x1000ac
+#define QM_AXI_M_CFG_ENABLE		0x1000b0
+#define QM_PEH_AXUSER_CFG		0x1000cc
+#define QM_PEH_AXUSER_CFG_ENABLE	0x1000d0
+
+#define QP_SQE_ADDR(qp) ((qp)->scqe.addr)
+
+struct qm_dma_buffer {
+	int size;
+	void *addr;
+	dma_addr_t dma;
+};
+
+struct qm_info {
+	int ver;
+	const char *dev_name;
+	struct pci_dev *pdev;
+
+	resource_size_t phys_base;
+	resource_size_t size;
+	void __iomem *io_base;
+
+	u32 sqe_size;
+	u32 qp_base;
+	u32 qp_num;
+
+	struct qm_dma_buffer sqc, cqc, eqc, eqe;
+
+	u32 eq_head;
+
+	rwlock_t qps_lock;
+	unsigned long *qp_bitmap;
+	struct hisi_qp **qp_array;
+
+	struct mutex mailbox_lock;
+
+	struct hisi_acc_qm_hw_ops *ops;
+
+};
+#define QM_ADDR(qm, off) ((qm)->io_base + off)
+
+struct hisi_acc_qp_status {
+	u16 sq_tail;
+	u16 sq_head;
+	u16 cq_head;
+	bool cqc_phase;
+	int is_sq_full;
+};
+
+struct hisi_qp;
+
+struct hisi_qp_ops {
+	int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm);
+};
+
+struct hisi_qp {
+	/* sq number in this function */
+	u32 queue_id;
+	u8 alg_type;
+	u8 req_type;
+
+	struct qm_dma_buffer sqc, cqc;
+	struct qm_dma_buffer scqe;
+
+	struct hisi_acc_qp_status qp_status;
+
+	struct qm_info *qm;
+
+	/* for crypto sync API */
+	struct completion completion;
+
+	struct hisi_qp_ops *hw_ops;
+	void *qp_ctx;
+	void (*event_cb)(struct hisi_qp *qp);
+	void (*req_cb)(struct hisi_qp *qp, void *data);
+};
+
+extern int hisi_qm_init(const char *dev_name, struct qm_info *qm);
+extern void hisi_qm_uninit(struct qm_info *qm);
+extern int hisi_qm_start(struct qm_info *qm);
+extern void hisi_qm_stop(struct qm_info *qm);
+extern int hisi_qm_mem_start(struct qm_info *qm);
+extern struct hisi_qp *hisi_qm_create_qp(struct qm_info *qm, u8 alg_type);
+extern int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg);
+extern void hisi_qm_release_qp(struct hisi_qp *qp);
+extern int hisi_qp_send(struct hisi_qp *qp, void *msg);
+#endif
-- 
2.17.1

  parent reply	other threads:[~2018-09-03  0:52 UTC|newest]

Thread overview: 91+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-09-03  0:51 [RFCv2 PATCH 0/7] A General Accelerator Framework, WarpDrive Kenneth Lee
2018-09-03  0:51 ` Kenneth Lee
2018-09-03  0:51 ` [PATCH 2/7] iommu: Add share domain interface in iommu for sdmdev Kenneth Lee
2018-09-03  0:52 ` [PATCH 3/7] vfio: add sdmdev support Kenneth Lee
2018-09-03  2:11   ` Randy Dunlap
2018-09-06  8:08     ` Kenneth Lee
2018-09-06  8:08       ` Kenneth Lee
2018-09-03  2:55   ` Lu Baolu
2018-09-06  9:01     ` Kenneth Lee
2018-09-06  9:01       ` Kenneth Lee
2018-09-04 15:31   ` [RFC PATCH] vfio: vfio_sdmdev_groups[] can be static kbuild test robot
2018-09-04 15:32   ` [PATCH 3/7] vfio: add sdmdev support kbuild test robot
     [not found]   ` <20180903005204.26041-4-nek.in.cn-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2018-09-04 15:32     ` kbuild test robot
2018-09-04 15:32       ` kbuild test robot
2018-09-05  7:27   ` Dan Carpenter
2018-09-05  7:27     ` Dan Carpenter
2018-09-03  0:52 ` Kenneth Lee [this message]
2018-09-03  2:15   ` [PATCH 4/7] crypto: add hisilicon Queue Manager driver Randy Dunlap
     [not found]     ` <4e46a451-d1cd-ac68-84b4-20792fdbc733-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org>
2018-09-06  9:08       ` Kenneth Lee
2018-09-06  9:08         ` Kenneth Lee
2018-09-03  0:52 ` [PATCH 5/7] crypto: Add Hisilicon Zip driver Kenneth Lee
2018-09-03  0:52 ` [PATCH 6/7] crypto: add sdmdev support to Hisilicon QM Kenneth Lee
2018-09-03  2:19   ` Randy Dunlap
2018-09-06  9:09     ` Kenneth Lee
2018-09-06  9:09       ` Kenneth Lee
2018-09-03  0:52 ` [PATCH 7/7] vfio/sdmdev: add user sample Kenneth Lee
2018-09-03  2:25   ` Randy Dunlap
2018-09-06  9:10     ` Kenneth Lee
2018-09-06  9:10       ` Kenneth Lee
2018-09-03  2:32 ` [RFCv2 PATCH 0/7] A General Accelerator Framework, WarpDrive Lu Baolu
     [not found]   ` <81edb8ff-d046-34e5-aee7-d8564e2517c2-VuQAYsv1563Yd54FQh9/CA@public.gmane.org>
2018-09-06  9:11     ` Kenneth Lee
2018-09-06  9:11       ` Kenneth Lee
2018-09-04 15:00 ` Jerome Glisse
2018-09-04 16:15   ` Alex Williamson
2018-09-06  9:45     ` Kenneth Lee
2018-09-06  9:45       ` Kenneth Lee
2018-09-06 13:31       ` Jerome Glisse
     [not found]         ` <20180906133133.GA3830-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2018-09-07  4:01           ` Kenneth Lee
2018-09-07  4:01             ` Kenneth Lee
2018-09-07 16:53             ` Jerome Glisse
2018-09-07 16:53               ` Jerome Glisse
2018-09-07 17:55               ` Jean-Philippe Brucker
2018-09-07 18:04                 ` Jerome Glisse
2018-09-10  3:28               ` Kenneth Lee
2018-09-10  3:28                 ` Kenneth Lee
2018-09-10 14:54                 ` Jerome Glisse
2018-09-10 14:54                   ` Jerome Glisse
     [not found]                   ` <20180910145423.GA3488-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2018-09-11  2:42                     ` Kenneth Lee
2018-09-11  2:42                       ` Kenneth Lee
2018-09-11  3:33                       ` Jerome Glisse
2018-09-11  3:33                         ` Jerome Glisse
     [not found]                         ` <20180911033358.GA4730-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2018-09-11  6:40                           ` Kenneth Lee
2018-09-11  6:40                             ` Kenneth Lee
2018-09-11 13:40                             ` Jerome Glisse
     [not found]                               ` <20180911134013.GA3932-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2018-09-13  8:32                                 ` Kenneth Lee
2018-09-13  8:32                                   ` Kenneth Lee
2018-09-13 14:51                                   ` Jerome Glisse
2018-09-14  3:12                                     ` Kenneth Lee
2018-09-14  3:12                                       ` Kenneth Lee
2018-09-14 14:05                                       ` Jerome Glisse
     [not found]                                     ` <20180913145149.GB3576-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2018-09-14  6:50                                       ` Tian, Kevin
2018-09-14  6:50                                         ` Tian, Kevin
2018-09-14 13:05                                         ` Kenneth Lee
2018-09-14 13:05                                           ` Kenneth Lee
2018-09-14 13:05                                           ` Kenneth Lee
2018-09-14 14:13                                         ` Jerome Glisse
2018-09-14 14:13                                           ` Jerome Glisse
2018-09-14 14:13                                           ` Jerome Glisse
     [not found] ` <20180903005204.26041-1-nek.in.cn-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2018-09-03  0:51   ` [PATCH 1/7] vfio/sdmdev: Add documents for WarpDrive framework Kenneth Lee
2018-09-03  0:51     ` Kenneth Lee
2018-09-06 18:36     ` Randy Dunlap
2018-09-07  2:21       ` Kenneth Lee
2018-09-07  2:21         ` Kenneth Lee
2018-09-17  1:42   ` [RFCv2 PATCH 0/7] A General Accelerator Framework, WarpDrive Jerome Glisse
2018-09-17  1:42     ` Jerome Glisse
     [not found]     ` <20180917014244.GA27596-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2018-09-17  8:39       ` Kenneth Lee
2018-09-17  8:39         ` Kenneth Lee
2018-09-17 12:37         ` Jerome Glisse
     [not found]           ` <20180917123744.GA3605-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2018-09-18  6:00             ` Kenneth Lee
2018-09-18  6:00               ` Kenneth Lee
2018-09-18 13:03               ` Jerome Glisse
2018-09-20  5:55                 ` Kenneth Lee
2018-09-20  5:55                   ` Kenneth Lee
2018-09-20 14:23                   ` Jerome Glisse
2018-09-21 10:05                     ` Kenneth Lee
2018-09-21 10:05                       ` Kenneth Lee
2018-09-21 10:03     ` Kenneth Lee
2018-09-21 10:03       ` Kenneth Lee
2018-09-21 14:52       ` Jerome Glisse
     [not found]         ` <20180921145201.GA3357-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2018-09-25  5:55           ` Kenneth Lee
2018-09-25  5:55             ` Kenneth Lee

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180903005204.26041-5-nek.in.cn@gmail.com \
    --to=nek.in.cn@gmail.com \
    --cc=alex.williamson@redhat.com \
    --cc=baolu.lu@linux.intel.com \
    --cc=corbet@lwn.net \
    --cc=davem@davemloft.net \
    --cc=fanghao11@huawei.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=herbert@gondor.apana.org.au \
    --cc=iommu@lists.linux-foundation.org \
    --cc=joro@8bytes.org \
    --cc=kvm@vger.kernel.org \
    --cc=liguozhu@hisilicon.com \
    --cc=linux-accelerators@lists.ozlabs.org \
    --cc=linux-crypto@vger.kernel.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linuxarm@huawei.com \
    --cc=pombredanne@nexb.com \
    --cc=sanjay.k.kumar@intel.com \
    --cc=tglx@linutronix.de \
    --cc=wangzhou1@hisilicon.com \
    --cc=xuzaibo@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.