dev.dpdk.org archive mirror
 help / color / mirror / Atom feed
From: <jerinj@marvell.com>
To: <dev@dpdk.org>, Jerin Jacob <jerinj@marvell.com>,
	Nithin Dabilpuram <ndabilpuram@marvell.com>,
	Vamsi Attunuru <vattunuru@marvell.com>
Cc: Krzysztof Kanas <kkanas@marvell.com>
Subject: [dpdk-dev] [PATCH v3 11/27] common/octeontx2: add PF to VF mailbox IRQ and msg handlers
Date: Mon, 17 Jun 2019 21:25:21 +0530	[thread overview]
Message-ID: <20190617155537.36144-12-jerinj@marvell.com> (raw)
In-Reply-To: <20190617155537.36144-1-jerinj@marvell.com>

From: Nithin Dabilpuram <ndabilpuram@marvell.com>

PF has additional responsibility being server for VF messages
and forward to AF and once AF process it then forward
the response back to VF.
otx2_vf_pf_mbox_irq() will process the VF mailbox request and
af_pf_wait_msg() will until getting a response back from AF.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
Signed-off-by: Krzysztof Kanas <kkanas@marvell.com>
---
 drivers/common/octeontx2/otx2_dev.c | 240 +++++++++++++++++++++++++++-
 1 file changed, 239 insertions(+), 1 deletion(-)

diff --git a/drivers/common/octeontx2/otx2_dev.c b/drivers/common/octeontx2/otx2_dev.c
index 090cfc8f1..efb28a9d2 100644
--- a/drivers/common/octeontx2/otx2_dev.c
+++ b/drivers/common/octeontx2/otx2_dev.c
@@ -7,6 +7,7 @@
 #include <sys/mman.h>
 #include <unistd.h>
 
+#include <rte_alarm.h>
 #include <rte_common.h>
 #include <rte_eal.h>
 #include <rte_memcpy.h>
@@ -50,6 +51,200 @@ mbox_mem_unmap(void *va, size_t size)
 		munmap(va, size);
 }
 
+static int
+af_pf_wait_msg(struct otx2_dev *dev, uint16_t vf, int num_msg)
+{
+	uint32_t timeout = 0, sleep = 1; struct otx2_mbox *mbox = dev->mbox;
+	struct otx2_mbox_dev *mdev = &mbox->dev[0];
+	volatile uint64_t int_status;
+	struct mbox_hdr *req_hdr;
+	struct mbox_msghdr *msg;
+	struct mbox_msghdr *rsp;
+	uint64_t offset;
+	size_t size;
+	int i;
+
+	/* We need to disable PF interrupts. We are in timer interrupt */
+	otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
+
+	/* Send message */
+	otx2_mbox_msg_send(mbox, 0);
+
+	do {
+		rte_delay_ms(sleep);
+		timeout++;
+		if (timeout >= MBOX_RSP_TIMEOUT) {
+			otx2_err("Routed messages %d timeout: %dms",
+				 num_msg, MBOX_RSP_TIMEOUT);
+			break;
+		}
+		int_status = otx2_read64(dev->bar2 + RVU_PF_INT);
+	} while ((int_status & 0x1) != 0x1);
+
+	/* Clear */
+	otx2_write64(~0ull, dev->bar2 + RVU_PF_INT);
+
+	/* Enable interrupts */
+	otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
+
+	rte_spinlock_lock(&mdev->mbox_lock);
+
+	req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
+	if (req_hdr->num_msgs != num_msg)
+		otx2_err("Routed messages: %d received: %d", num_msg,
+			 req_hdr->num_msgs);
+
+	/* Get messages from mbox */
+	offset = mbox->rx_start +
+			RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+	for (i = 0; i < req_hdr->num_msgs; i++) {
+		msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
+		size = mbox->rx_start + msg->next_msgoff - offset;
+
+		/* Reserve PF/VF mbox message */
+		size = RTE_ALIGN(size, MBOX_MSG_ALIGN);
+		rsp = otx2_mbox_alloc_msg(&dev->mbox_vfpf, vf, size);
+		otx2_mbox_rsp_init(msg->id, rsp);
+
+		/* Copy message from AF<->PF mbox to PF<->VF mbox */
+		otx2_mbox_memcpy((uint8_t *)rsp + sizeof(struct mbox_msghdr),
+				 (uint8_t *)msg + sizeof(struct mbox_msghdr),
+				 size - sizeof(struct mbox_msghdr));
+
+		/* Set status and sender pf_func data */
+		rsp->rc = msg->rc;
+		rsp->pcifunc = msg->pcifunc;
+
+		offset = mbox->rx_start + msg->next_msgoff;
+	}
+	rte_spinlock_unlock(&mdev->mbox_lock);
+
+	return req_hdr->num_msgs;
+}
+
+static int
+vf_pf_process_msgs(struct otx2_dev *dev, uint16_t vf)
+{
+	int offset, routed = 0; struct otx2_mbox *mbox = &dev->mbox_vfpf;
+	struct otx2_mbox_dev *mdev = &mbox->dev[vf];
+	struct mbox_hdr *req_hdr;
+	struct mbox_msghdr *msg;
+	size_t size;
+	uint16_t i;
+
+	req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
+	if (!req_hdr->num_msgs)
+		return 0;
+
+	offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+
+	for (i = 0; i < req_hdr->num_msgs; i++) {
+
+		msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
+		size = mbox->rx_start + msg->next_msgoff - offset;
+
+		/* RVU_PF_FUNC_S */
+		msg->pcifunc = otx2_pfvf_func(dev->pf, vf);
+
+		if (msg->id == MBOX_MSG_READY) {
+			struct ready_msg_rsp *rsp;
+			uint16_t max_bits = sizeof(dev->active_vfs[0]) * 8;
+
+			/* Handle READY message in PF */
+			dev->active_vfs[vf / max_bits] |=
+						BIT_ULL(vf % max_bits);
+			rsp = (struct ready_msg_rsp *)
+			       otx2_mbox_alloc_msg(mbox, vf, sizeof(*rsp));
+			otx2_mbox_rsp_init(msg->id, rsp);
+
+			/* PF/VF function ID */
+			rsp->hdr.pcifunc = msg->pcifunc;
+			rsp->hdr.rc = 0;
+		} else {
+			struct mbox_msghdr *af_req;
+			/* Reserve AF/PF mbox message */
+			size = RTE_ALIGN(size, MBOX_MSG_ALIGN);
+			af_req = otx2_mbox_alloc_msg(dev->mbox, 0, size);
+			otx2_mbox_req_init(msg->id, af_req);
+
+			/* Copy message from VF<->PF mbox to PF<->AF mbox */
+			otx2_mbox_memcpy((uint8_t *)af_req +
+				   sizeof(struct mbox_msghdr),
+				   (uint8_t *)msg + sizeof(struct mbox_msghdr),
+				   size - sizeof(struct mbox_msghdr));
+			af_req->pcifunc = msg->pcifunc;
+			routed++;
+		}
+		offset = mbox->rx_start + msg->next_msgoff;
+	}
+
+	if (routed > 0) {
+		otx2_base_dbg("pf:%d routed %d messages from vf:%d to AF",
+			      dev->pf, routed, vf);
+		af_pf_wait_msg(dev, vf, routed);
+		otx2_mbox_reset(dev->mbox, 0);
+	}
+
+	/* Send mbox responses to VF */
+	if (mdev->num_msgs) {
+		otx2_base_dbg("pf:%d reply %d messages to vf:%d",
+			      dev->pf, mdev->num_msgs, vf);
+		otx2_mbox_msg_send(mbox, vf);
+	}
+
+	return i;
+}
+
+static void
+otx2_vf_pf_mbox_handle_msg(void *param)
+{
+	uint16_t vf, max_vf, max_bits;
+	struct otx2_dev *dev = param;
+
+	max_bits = sizeof(dev->intr.bits[0]) * sizeof(uint64_t);
+	max_vf = max_bits * MAX_VFPF_DWORD_BITS;
+
+	for (vf = 0; vf < max_vf; vf++) {
+		if (dev->intr.bits[vf/max_bits] & BIT_ULL(vf%max_bits)) {
+			otx2_base_dbg("Process vf:%d request (pf:%d, vf:%d)",
+				       vf, dev->pf, dev->vf);
+			vf_pf_process_msgs(dev, vf);
+			dev->intr.bits[vf/max_bits] &= ~(BIT_ULL(vf%max_bits));
+		}
+	}
+	dev->timer_set = 0;
+}
+
+static void
+otx2_vf_pf_mbox_irq(void *param)
+{
+	struct otx2_dev *dev = param;
+	bool alarm_set = false;
+	uint64_t intr;
+	int vfpf;
+
+	for (vfpf = 0; vfpf < MAX_VFPF_DWORD_BITS; ++vfpf) {
+		intr = otx2_read64(dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));
+		if (!intr)
+			continue;
+
+		otx2_base_dbg("vfpf: %d intr: 0x%" PRIx64 " (pf:%d, vf:%d)",
+			      vfpf, intr, dev->pf, dev->vf);
+
+		/* Save and clear intr bits */
+		dev->intr.bits[vfpf] |= intr;
+		otx2_write64(intr, dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));
+		alarm_set = true;
+	}
+
+	if (!dev->timer_set && alarm_set) {
+		dev->timer_set = 1;
+		/* Start timer to handle messages */
+		rte_eal_alarm_set(VF_PF_MBOX_TIMER_MS,
+				  otx2_vf_pf_mbox_handle_msg, dev);
+	}
+}
+
 static void
 otx2_process_msgs(struct otx2_dev *dev, struct otx2_mbox *mbox)
 {
@@ -118,12 +313,33 @@ static int
 mbox_register_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
 {
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
-	int rc;
+	int i, rc;
+
+	/* HW clear irq */
+	for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
+		otx2_write64(~0ull, dev->bar2 +
+			     RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));
 
 	otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
 
 	dev->timer_set = 0;
 
+	/* MBOX interrupt for VF(0...63) <-> PF */
+	rc = otx2_register_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
+			       RVU_PF_INT_VEC_VFPF_MBOX0);
+
+	if (rc) {
+		otx2_err("Fail to register PF(VF0-63) mbox irq");
+		return rc;
+	}
+	/* MBOX interrupt for VF(64...128) <-> PF */
+	rc = otx2_register_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
+			       RVU_PF_INT_VEC_VFPF_MBOX1);
+
+	if (rc) {
+		otx2_err("Fail to register PF(VF64-128) mbox irq");
+		return rc;
+	}
 	/* MBOX interrupt AF <-> PF */
 	rc = otx2_register_irq(intr_handle, otx2_af_pf_mbox_irq,
 			       dev, RVU_PF_INT_VEC_AFPF_MBOX);
@@ -132,6 +348,11 @@ mbox_register_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
 		return rc;
 	}
 
+	/* HW enable intr */
+	for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
+		otx2_write64(~0ull, dev->bar2 +
+			RVU_PF_VFPF_MBOX_INT_ENA_W1SX(i));
+
 	otx2_write64(~0ull, dev->bar2 + RVU_PF_INT);
 	otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
 
@@ -142,11 +363,28 @@ static void
 mbox_unregister_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
 {
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+	int i;
+
+	/* HW clear irq */
+	for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
+		otx2_write64(~0ull, dev->bar2 +
+			     RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));
 
 	otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
 
 	dev->timer_set = 0;
 
+	rte_eal_alarm_cancel(otx2_vf_pf_mbox_handle_msg, dev);
+
+	/* Unregister the interrupt handler for each vectors */
+	/* MBOX interrupt for VF(0...63) <-> PF */
+	otx2_unregister_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
+			    RVU_PF_INT_VEC_VFPF_MBOX0);
+
+	/* MBOX interrupt for VF(64...128) <-> PF */
+	otx2_unregister_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
+			    RVU_PF_INT_VEC_VFPF_MBOX1);
+
 	/* MBOX interrupt AF <-> PF */
 	otx2_unregister_irq(intr_handle, otx2_af_pf_mbox_irq, dev,
 			    RVU_PF_INT_VEC_AFPF_MBOX);
-- 
2.21.0


  parent reply	other threads:[~2019-06-17 15:58 UTC|newest]

Thread overview: 122+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-23  8:13 [dpdk-dev] [PATCH v1 00/27] OCTEON TX2 common and mempool driver jerinj
2019-05-23  8:13 ` [dpdk-dev] [PATCH v1 01/27] common/octeontx2: add build infrastructure and HW definition jerinj
2019-05-23  8:13 ` [dpdk-dev] [PATCH v1 02/27] common/octeontx2: add IO handling APIs jerinj
2019-05-23  8:13 ` [dpdk-dev] [PATCH v1 03/27] common/octeontx2: add mbox request and response definition jerinj
2019-05-23  8:13 ` [dpdk-dev] [PATCH v1 04/27] common/octeontx2: add mailbox base support infra jerinj
2019-05-23  8:13 ` [dpdk-dev] [PATCH v1 05/27] common/octeontx2: add runtime log infra jerinj
2019-05-23  8:13 ` [dpdk-dev] [PATCH v1 06/27] common/octeontx2: add mailbox send and receive support jerinj
2019-05-23  8:13 ` [dpdk-dev] [PATCH v1 07/27] common/octeontx2: introduce common device class jerinj
2019-05-23  8:13 ` [dpdk-dev] [PATCH v1 08/27] common/octeontx2: introduce irq handling functions jerinj
2019-05-23  8:13 ` [dpdk-dev] [PATCH v1 09/27] common/octeontx2: handle intra device operations jerinj
2019-05-23  8:13 ` [dpdk-dev] [PATCH v1 10/27] common/octeontx2: add AF to PF mailbox IRQ and msg handlers jerinj
2019-05-23  8:13 ` [dpdk-dev] [PATCH v1 11/27] common/octeontx2: add PF to VF " jerinj
2019-05-23  8:13 ` [dpdk-dev] [PATCH v1 12/27] common/octeontx2: add VF mailbox IRQ and msg handler jerinj
2019-05-23  8:13 ` [dpdk-dev] [PATCH v1 13/27] common/octeontx2: add uplink message support jerinj
2019-05-23  8:13 ` [dpdk-dev] [PATCH v1 14/27] common/octeontx2: add FLR IRQ handler jerinj
2019-05-23  8:13 ` [dpdk-dev] [PATCH v1 15/27] doc: add Marvell OCTEON TX2 platform guide jerinj
2019-05-23  8:13 ` [dpdk-dev] [PATCH v1 16/27] mempool/octeontx2: add build infra and device probe jerinj
2019-05-23  8:13 ` [dpdk-dev] [PATCH v1 17/27] drivers: add init and fini on octeontx2 NPA object jerinj
2019-05-23  8:13 ` [dpdk-dev] [PATCH v1 18/27] mempool/octeontx2: add NPA HW operations jerinj
2019-05-23  8:13 ` [dpdk-dev] [PATCH v1 19/27] mempool/octeontx2: add NPA IRQ handler jerinj
2019-05-23  8:13 ` [dpdk-dev] [PATCH v1 20/27] mempool/octeontx2: add context dump support jerinj
2019-05-23  8:13 ` [dpdk-dev] [PATCH v1 21/27] mempool/octeontx2: add mempool alloc op jerinj
2019-05-23  8:13 ` [dpdk-dev] [PATCH v1 22/27] mempool/octeontx2: add mempool free op jerinj
2019-05-23  8:13 ` [dpdk-dev] [PATCH v1 23/27] mempool/octeontx2: add remaining slow path ops jerinj
2019-05-23  8:13 ` [dpdk-dev] [PATCH v1 24/27] mempool/octeontx2: add fast path mempool ops jerinj
2019-05-23  8:13 ` [dpdk-dev] [PATCH v1 25/27] mempool/octeontx2: add optimized dequeue operation for arm64 jerinj
2019-05-24 13:32   ` Aaron Conole
2019-05-27  9:20     ` Jerin Jacob Kollanukkaran
2019-05-23  8:13 ` [dpdk-dev] [PATCH v1 26/27] mempool/octeontx2: add devargs for max pool selection jerinj
2019-05-23  8:13 ` [dpdk-dev] [PATCH v1 27/27] doc: add Marvell OCTEON TX2 mempool documentation jerinj
2019-06-01  1:48 ` [dpdk-dev] [PATCH v2 00/27] OCTEON TX2 common and mempool driver jerinj
2019-06-01  1:48   ` [dpdk-dev] [PATCH v2 01/27] common/octeontx2: add build infrastructure and HW definition jerinj
2019-06-01  1:48   ` [dpdk-dev] [PATCH v2 02/27] common/octeontx2: add IO handling APIs jerinj
2019-06-01  1:48   ` [dpdk-dev] [PATCH v2 03/27] common/octeontx2: add mbox request and response definition jerinj
2019-06-01  1:48   ` [dpdk-dev] [PATCH v2 04/27] common/octeontx2: add mailbox base support infra jerinj
2019-06-01  1:48   ` [dpdk-dev] [PATCH v2 05/27] common/octeontx2: add runtime log infra jerinj
2019-06-01  1:48   ` [dpdk-dev] [PATCH v2 06/27] common/octeontx2: add mailbox send and receive support jerinj
2019-06-01  1:48   ` [dpdk-dev] [PATCH v2 07/27] common/octeontx2: introduce common device class jerinj
2019-06-01  1:48   ` [dpdk-dev] [PATCH v2 08/27] common/octeontx2: introduce irq handling functions jerinj
2019-06-01  1:48   ` [dpdk-dev] [PATCH v2 09/27] common/octeontx2: handle intra device operations jerinj
2019-06-01  1:48   ` [dpdk-dev] [PATCH v2 10/27] common/octeontx2: add AF to PF mailbox IRQ and msg handlers jerinj
2019-06-01  1:48   ` [dpdk-dev] [PATCH v2 11/27] common/octeontx2: add PF to VF " jerinj
2019-06-01  1:48   ` [dpdk-dev] [PATCH v2 12/27] common/octeontx2: add VF mailbox IRQ and msg handler jerinj
2019-06-01  1:48   ` [dpdk-dev] [PATCH v2 13/27] common/octeontx2: add uplink message support jerinj
2019-06-01  1:48   ` [dpdk-dev] [PATCH v2 14/27] common/octeontx2: add FLR IRQ handler jerinj
2019-06-01  1:48   ` [dpdk-dev] [PATCH v2 15/27] doc: add Marvell OCTEON TX2 platform guide jerinj
2019-06-01  1:48   ` [dpdk-dev] [PATCH v2 16/27] mempool/octeontx2: add build infra and device probe jerinj
2019-06-01  1:48   ` [dpdk-dev] [PATCH v2 17/27] drivers: add init and fini on octeontx2 NPA object jerinj
2019-06-01  1:48   ` [dpdk-dev] [PATCH v2 18/27] mempool/octeontx2: add NPA HW operations jerinj
2019-06-01  1:48   ` [dpdk-dev] [PATCH v2 19/27] mempool/octeontx2: add NPA IRQ handler jerinj
2019-06-01  1:48   ` [dpdk-dev] [PATCH v2 20/27] mempool/octeontx2: add context dump support jerinj
2019-06-01  1:48   ` [dpdk-dev] [PATCH v2 21/27] mempool/octeontx2: add mempool alloc op jerinj
2019-06-01  1:49   ` [dpdk-dev] [PATCH v2 22/27] mempool/octeontx2: add mempool free op jerinj
2019-06-01  1:49   ` [dpdk-dev] [PATCH v2 23/27] mempool/octeontx2: add remaining slow path ops jerinj
2019-06-01  1:49   ` [dpdk-dev] [PATCH v2 24/27] mempool/octeontx2: add fast path mempool ops jerinj
2019-06-01  1:49   ` [dpdk-dev] [PATCH v2 25/27] mempool/octeontx2: add optimized dequeue operation for arm64 jerinj
2019-06-01  1:49   ` [dpdk-dev] [PATCH v2 26/27] mempool/octeontx2: add devargs for max pool selection jerinj
2019-06-01  1:49   ` [dpdk-dev] [PATCH v2 27/27] doc: add Marvell OCTEON TX2 mempool documentation jerinj
2019-06-17 15:55   ` [dpdk-dev] [PATCH v3 00/27] OCTEON TX2 common and mempool driver jerinj
2019-06-17 15:55     ` [dpdk-dev] [PATCH v3 01/27] common/octeontx2: add build infrastructure and HW definition jerinj
2019-06-17 15:55     ` [dpdk-dev] [PATCH v3 02/27] common/octeontx2: add IO handling APIs jerinj
2019-06-17 15:55     ` [dpdk-dev] [PATCH v3 03/27] common/octeontx2: add mbox request and response definition jerinj
2019-06-17 15:55     ` [dpdk-dev] [PATCH v3 04/27] common/octeontx2: add mailbox base support infra jerinj
2019-06-17 15:55     ` [dpdk-dev] [PATCH v3 05/27] common/octeontx2: add runtime log infra jerinj
2019-06-17 15:55     ` [dpdk-dev] [PATCH v3 06/27] common/octeontx2: add mailbox send and receive support jerinj
2019-06-17 15:55     ` [dpdk-dev] [PATCH v3 07/27] common/octeontx2: introduce common device class jerinj
2019-06-17 15:55     ` [dpdk-dev] [PATCH v3 08/27] common/octeontx2: introduce irq handling functions jerinj
2019-06-17 15:55     ` [dpdk-dev] [PATCH v3 09/27] common/octeontx2: handle intra device operations jerinj
2019-06-17 15:55     ` [dpdk-dev] [PATCH v3 10/27] common/octeontx2: add AF to PF mailbox IRQ and msg handlers jerinj
2019-06-17 15:55     ` jerinj [this message]
2019-06-17 15:55     ` [dpdk-dev] [PATCH v3 12/27] common/octeontx2: add VF mailbox IRQ and msg handler jerinj
2019-06-17 15:55     ` [dpdk-dev] [PATCH v3 13/27] common/octeontx2: add uplink message support jerinj
2019-06-17 15:55     ` [dpdk-dev] [PATCH v3 14/27] common/octeontx2: add FLR IRQ handler jerinj
2019-06-17 15:55     ` [dpdk-dev] [PATCH v3 15/27] doc: add Marvell OCTEON TX2 platform guide jerinj
2019-06-17 15:55     ` [dpdk-dev] [PATCH v3 16/27] mempool/octeontx2: add build infra and device probe jerinj
2019-06-17 15:55     ` [dpdk-dev] [PATCH v3 17/27] drivers: add init and fini on octeontx2 NPA object jerinj
2019-06-17 15:55     ` [dpdk-dev] [PATCH v3 18/27] mempool/octeontx2: add NPA HW operations jerinj
2019-06-17 15:55     ` [dpdk-dev] [PATCH v3 19/27] mempool/octeontx2: add NPA IRQ handler jerinj
2019-06-17 15:55     ` [dpdk-dev] [PATCH v3 20/27] mempool/octeontx2: add context dump support jerinj
2019-06-17 15:55     ` [dpdk-dev] [PATCH v3 21/27] mempool/octeontx2: add mempool alloc op jerinj
2019-06-17 15:55     ` [dpdk-dev] [PATCH v3 22/27] mempool/octeontx2: add mempool free op jerinj
2019-06-17 15:55     ` [dpdk-dev] [PATCH v3 23/27] mempool/octeontx2: add remaining slow path ops jerinj
2019-06-17 15:55     ` [dpdk-dev] [PATCH v3 24/27] mempool/octeontx2: add fast path mempool ops jerinj
2019-06-17 15:55     ` [dpdk-dev] [PATCH v3 25/27] mempool/octeontx2: add optimized dequeue operation for arm64 jerinj
2019-06-17 21:25       ` Aaron Conole
2019-06-18  7:39         ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula
2019-06-21 19:26           ` Aaron Conole
2019-06-17 15:55     ` [dpdk-dev] [PATCH v3 26/27] mempool/octeontx2: add devargs for max pool selection jerinj
2019-06-17 15:55     ` [dpdk-dev] [PATCH v3 27/27] doc: add Marvell OCTEON TX2 mempool documentation jerinj
2019-06-20  8:39     ` [dpdk-dev] [PATCH v3 00/27] OCTEON TX2 common and mempool driver Jerin Jacob Kollanukkaran
2019-06-22 13:23     ` [dpdk-dev] [PATCH v4 " jerinj
2019-06-22 13:23       ` [dpdk-dev] [PATCH v4 01/27] common/octeontx2: add build infrastructure and HW definition jerinj
2019-06-22 13:23       ` [dpdk-dev] [PATCH v4 02/27] common/octeontx2: add IO handling APIs jerinj
2019-06-22 13:23       ` [dpdk-dev] [PATCH v4 03/27] common/octeontx2: add mbox request and response definition jerinj
2019-06-22 13:23       ` [dpdk-dev] [PATCH v4 04/27] common/octeontx2: add mailbox base support infra jerinj
2019-06-22 13:23       ` [dpdk-dev] [PATCH v4 05/27] common/octeontx2: add runtime log infra jerinj
2019-06-22 13:23       ` [dpdk-dev] [PATCH v4 06/27] common/octeontx2: add mailbox send and receive support jerinj
2019-06-22 13:23       ` [dpdk-dev] [PATCH v4 07/27] common/octeontx2: introduce common device class jerinj
2019-06-22 13:23       ` [dpdk-dev] [PATCH v4 08/27] common/octeontx2: introduce irq handling functions jerinj
2019-06-22 13:23       ` [dpdk-dev] [PATCH v4 09/27] common/octeontx2: handle intra device operations jerinj
2019-06-22 13:24       ` [dpdk-dev] [PATCH v4 10/27] common/octeontx2: add AF to PF mailbox IRQ and msg handlers jerinj
2019-06-22 13:24       ` [dpdk-dev] [PATCH v4 11/27] common/octeontx2: add PF to VF " jerinj
2019-06-22 13:24       ` [dpdk-dev] [PATCH v4 12/27] common/octeontx2: add VF mailbox IRQ and msg handler jerinj
2019-06-22 13:24       ` [dpdk-dev] [PATCH v4 13/27] common/octeontx2: add uplink message support jerinj
2019-06-22 13:24       ` [dpdk-dev] [PATCH v4 14/27] common/octeontx2: add FLR IRQ handler jerinj
2019-06-22 13:24       ` [dpdk-dev] [PATCH v4 15/27] doc: add Marvell OCTEON TX2 platform guide jerinj
2019-06-22 13:24       ` [dpdk-dev] [PATCH v4 16/27] mempool/octeontx2: add build infra and device probe jerinj
2019-06-22 13:24       ` [dpdk-dev] [PATCH v4 17/27] drivers: add init and fini on octeontx2 NPA object jerinj
2019-06-22 13:24       ` [dpdk-dev] [PATCH v4 18/27] mempool/octeontx2: add NPA HW operations jerinj
2019-06-22 13:24       ` [dpdk-dev] [PATCH v4 19/27] mempool/octeontx2: add NPA IRQ handler jerinj
2019-06-22 13:24       ` [dpdk-dev] [PATCH v4 20/27] mempool/octeontx2: add context dump support jerinj
2019-06-22 13:24       ` [dpdk-dev] [PATCH v4 21/27] mempool/octeontx2: add mempool alloc op jerinj
2019-06-22 13:24       ` [dpdk-dev] [PATCH v4 22/27] mempool/octeontx2: add mempool free op jerinj
2019-06-22 13:24       ` [dpdk-dev] [PATCH v4 23/27] mempool/octeontx2: add remaining slow path ops jerinj
2019-06-22 13:24       ` [dpdk-dev] [PATCH v4 24/27] mempool/octeontx2: add fast path mempool ops jerinj
2019-06-22 13:24       ` [dpdk-dev] [PATCH v4 25/27] mempool/octeontx2: add optimized dequeue operation for arm64 jerinj
2019-06-22 13:24       ` [dpdk-dev] [PATCH v4 26/27] mempool/octeontx2: add devargs for max pool selection jerinj
2019-06-22 13:24       ` [dpdk-dev] [PATCH v4 27/27] doc: add Marvell OCTEON TX2 mempool documentation jerinj
2019-06-25 21:25         ` Thomas Monjalon
2019-06-25 21:39       ` [dpdk-dev] [PATCH v4 00/27] OCTEON TX2 common and mempool driver Thomas Monjalon
2019-06-26 23:10         ` Stephen Hemminger
2019-06-26 13:14       ` Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190617155537.36144-12-jerinj@marvell.com \
    --to=jerinj@marvell.com \
    --cc=dev@dpdk.org \
    --cc=kkanas@marvell.com \
    --cc=ndabilpuram@marvell.com \
    --cc=vattunuru@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).