All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jakub Kicinski <jakub.kicinski@netronome.com>
To: netdev@vger.kernel.org
Cc: oss-drivers@netronome.com, Jakub Kicinski <jakub.kicinski@netronome.com>
Subject: [PATCH net-next 09/16] nfp: add control vNIC datapath
Date: Mon,  5 Jun 2017 17:01:50 -0700	[thread overview]
Message-ID: <20170606000157.17556-10-jakub.kicinski@netronome.com> (raw)
In-Reply-To: <20170606000157.17556-1-jakub.kicinski@netronome.com>

Since control vNICs don't have a netdev, they can't use napi and
queuing stack provides.  Add simple tasklet-based data receive
and send of control messages with queuing on a skb_list.

Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
---
 drivers/net/ethernet/netronome/nfp/nfp_app.h       |  11 +
 drivers/net/ethernet/netronome/nfp/nfp_net.h       |  17 +-
 .../net/ethernet/netronome/nfp/nfp_net_common.c    | 323 ++++++++++++++++++++-
 drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h  |   3 +
 4 files changed, 345 insertions(+), 9 deletions(-)

diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index 13efdefffa1a..f6091ad0a9a9 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -38,6 +38,7 @@ struct bpf_prog;
 struct net_device;
 struct pci_dev;
 struct tc_to_netdev;
+struct sk_buff;
 struct nfp_app;
 struct nfp_cpp;
 struct nfp_pf;
@@ -55,6 +56,7 @@ extern const struct nfp_app_type app_bpf;
  * struct nfp_app_type - application definition
  * @id:		application ID
  * @name:	application name
+ * @ctrl_has_meta:  control messages have prepend of type:5/port:CTRL
  *
  * Callbacks
  * @init:	perform basic app checks
@@ -69,6 +71,8 @@ struct nfp_app_type {
 	enum nfp_app_id id;
 	const char *name;
 
+	bool ctrl_has_meta;
+
 	int (*init)(struct nfp_app *app);
 
 	const char *(*extra_cap)(struct nfp_app *app, struct nfp_net *nn);
@@ -99,6 +103,8 @@ struct nfp_app {
 	const struct nfp_app_type *type;
 };
 
+bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
+
 static inline int nfp_app_init(struct nfp_app *app)
 {
 	if (!app->type->init)
@@ -125,6 +131,11 @@ static inline const char *nfp_app_name(struct nfp_app *app)
 	return app->type->name;
 }
 
+static inline bool nfp_app_ctrl_has_meta(struct nfp_app *app)
+{
+	return app->type->ctrl_has_meta;
+}
+
 static inline const char *nfp_app_extra_cap(struct nfp_app *app,
 					    struct nfp_net *nn)
 {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 6b21c4d0ccfa..eb849d26f4dd 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -405,7 +405,14 @@ struct nfp_net_rx_ring {
  */
 struct nfp_net_r_vector {
 	struct nfp_net *nfp_net;
-	struct napi_struct napi;
+	union {
+		struct napi_struct napi;
+		struct {
+			struct tasklet_struct tasklet;
+			struct sk_buff_head queue;
+			struct spinlock lock;
+		};
+	};
 
 	struct nfp_net_tx_ring *tx_ring;
 	struct nfp_net_rx_ring *rx_ring;
@@ -816,6 +823,11 @@ static inline bool nfp_net_running(struct nfp_net *nn)
 	return nn->dp.ctrl & NFP_NET_CFG_CTRL_ENABLE;
 }
 
+static inline const char *nfp_net_name(struct nfp_net *nn)
+{
+	return nn->dp.netdev ? nn->dp.netdev->name : "ctrl";
+}
+
 /* Globals */
 extern const char nfp_driver_version[];
 
@@ -838,6 +850,9 @@ void nfp_net_free(struct nfp_net *nn);
 int nfp_net_init(struct nfp_net *nn);
 void nfp_net_clean(struct nfp_net *nn);
 
+int nfp_ctrl_open(struct nfp_net *nn);
+void nfp_ctrl_close(struct nfp_net *nn);
+
 void nfp_net_set_ethtool_ops(struct net_device *netdev);
 void nfp_net_info(struct nfp_net *nn);
 int nfp_net_reconfig(struct nfp_net *nn, u32 update);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index c47705861a81..59f1764242a0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -392,6 +392,15 @@ static irqreturn_t nfp_net_irq_rxtx(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
+static irqreturn_t nfp_ctrl_irq_rxtx(int irq, void *data)
+{
+	struct nfp_net_r_vector *r_vec = data;
+
+	tasklet_schedule(&r_vec->tasklet);
+
+	return IRQ_HANDLED;
+}
+
 /**
  * nfp_net_read_link_status() - Reread link status from control BAR
  * @nn:       NFP Network structure
@@ -523,7 +532,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
 
 	entry = &nn->irq_entries[vector_idx];
 
-	snprintf(name, name_sz, format, netdev_name(nn->dp.netdev));
+	snprintf(name, name_sz, format, nfp_net_name(nn));
 	err = request_irq(entry->vector, handler, 0, name, nn);
 	if (err) {
 		nn_err(nn, "Failed to request IRQ %d (err=%d).\n",
@@ -943,6 +952,9 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
 	r_vec->tx_pkts += done_pkts;
 	u64_stats_update_end(&r_vec->tx_sync);
 
+	if (!dp->netdev)
+		return;
+
 	nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
 	netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
 	if (nfp_net_tx_ring_should_wake(tx_ring)) {
@@ -1052,7 +1064,7 @@ nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
 	tx_ring->qcp_rd_p = 0;
 	tx_ring->wr_ptr_add = 0;
 
-	if (tx_ring->is_xdp)
+	if (tx_ring->is_xdp || !dp->netdev)
 		return;
 
 	nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
@@ -1742,6 +1754,231 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
 	return pkts_polled;
 }
 
+/* Control device data path
+ */
+
+static bool
+nfp_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+		struct sk_buff *skb, bool old)
+{
+	unsigned int real_len = skb->len, meta_len = 0;
+	struct nfp_net_tx_ring *tx_ring;
+	struct nfp_net_tx_buf *txbuf;
+	struct nfp_net_tx_desc *txd;
+	struct nfp_net_dp *dp;
+	dma_addr_t dma_addr;
+	int wr_idx;
+
+	dp = &r_vec->nfp_net->dp;
+	tx_ring = r_vec->tx_ring;
+
+	if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
+		nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n");
+		goto err_free;
+	}
+
+	if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
+		u64_stats_update_begin(&r_vec->tx_sync);
+		r_vec->tx_busy++;
+		u64_stats_update_end(&r_vec->tx_sync);
+		if (!old)
+			__skb_queue_tail(&r_vec->queue, skb);
+		else
+			__skb_queue_head(&r_vec->queue, skb);
+		return true;
+	}
+
+	if (nfp_app_ctrl_has_meta(nn->app)) {
+		if (unlikely(skb_headroom(skb) < 8)) {
+			nn_dp_warn(dp, "CTRL TX on skb without headroom\n");
+			goto err_free;
+		}
+		meta_len = 8;
+		put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
+		put_unaligned_be32(NFP_NET_META_PORTID, skb_push(skb, 4));
+	}
+
+	/* Start with the head skbuf */
+	dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
+				  DMA_TO_DEVICE);
+	if (dma_mapping_error(dp->dev, dma_addr))
+		goto err_dma_warn;
+
+	wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+
+	/* Stash the soft descriptor of the head then initialize it */
+	txbuf = &tx_ring->txbufs[wr_idx];
+	txbuf->skb = skb;
+	txbuf->dma_addr = dma_addr;
+	txbuf->fidx = -1;
+	txbuf->pkt_cnt = 1;
+	txbuf->real_len = real_len;
+
+	/* Build TX descriptor */
+	txd = &tx_ring->txds[wr_idx];
+	txd->offset_eop = meta_len | PCIE_DESC_TX_EOP;
+	txd->dma_len = cpu_to_le16(skb_headlen(skb));
+	nfp_desc_set_dma_addr(txd, dma_addr);
+	txd->data_len = cpu_to_le16(skb->len);
+
+	txd->flags = 0;
+	txd->mss = 0;
+	txd->lso_hdrlen = 0;
+
+	tx_ring->wr_p++;
+	tx_ring->wr_ptr_add++;
+	nfp_net_tx_xmit_more_flush(tx_ring);
+
+	return false;
+
+err_dma_warn:
+	nn_dp_warn(dp, "Failed to DMA map TX CTRL buffer\n");
+err_free:
+	u64_stats_update_begin(&r_vec->tx_sync);
+	r_vec->tx_errors++;
+	u64_stats_update_end(&r_vec->tx_sync);
+	dev_kfree_skb_any(skb);
+	return false;
+}
+
+bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
+{
+	struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
+	bool ret;
+
+	spin_lock_bh(&r_vec->lock);
+	ret = nfp_ctrl_tx_one(nn, r_vec, skb, false);
+	spin_unlock_bh(&r_vec->lock);
+
+	return ret;
+}
+
+static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
+{
+	struct sk_buff *skb;
+
+	while ((skb = __skb_dequeue(&r_vec->queue)))
+		if (nfp_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
+			return;
+}
+
+static bool
+nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len)
+{
+	u32 meta_type, meta_tag;
+
+	if (!nfp_app_ctrl_has_meta(nn->app))
+		return !meta_len;
+
+	if (meta_len != 8)
+		return false;
+
+	meta_type = get_unaligned_be32(data);
+	meta_tag = get_unaligned_be32(data + 4);
+
+	return (meta_type == NFP_NET_META_PORTID &&
+		meta_tag == NFP_META_PORT_ID_CTRL);
+}
+
+static bool
+nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
+		struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring)
+{
+	unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
+	struct nfp_net_rx_buf *rxbuf;
+	struct nfp_net_rx_desc *rxd;
+	dma_addr_t new_dma_addr;
+	struct sk_buff *skb;
+	void *new_frag;
+	int idx;
+
+	idx = D_IDX(rx_ring, rx_ring->rd_p);
+
+	rxd = &rx_ring->rxds[idx];
+	if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
+		return false;
+
+	/* Memory barrier to ensure that we won't do other reads
+	 * before the DD bit.
+	 */
+	dma_rmb();
+
+	rx_ring->rd_p++;
+
+	rxbuf =	&rx_ring->rxbufs[idx];
+	meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
+	data_len = le16_to_cpu(rxd->rxd.data_len);
+	pkt_len = data_len - meta_len;
+
+	pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
+	if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+		pkt_off += meta_len;
+	else
+		pkt_off += dp->rx_offset;
+	meta_off = pkt_off - meta_len;
+
+	/* Stats update */
+	u64_stats_update_begin(&r_vec->rx_sync);
+	r_vec->rx_pkts++;
+	r_vec->rx_bytes += pkt_len;
+	u64_stats_update_end(&r_vec->rx_sync);
+
+	nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,	data_len);
+
+	if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
+		nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
+			   meta_len);
+		nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+		return true;
+	}
+
+	skb = build_skb(rxbuf->frag, dp->fl_bufsz);
+	if (unlikely(!skb)) {
+		nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+		return true;
+	}
+	new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
+	if (unlikely(!new_frag)) {
+		nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
+		return true;
+	}
+
+	nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
+
+	nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
+
+	skb_reserve(skb, pkt_off);
+	skb_put(skb, pkt_len);
+
+	dev_kfree_skb_any(skb);
+
+	return true;
+}
+
+static void nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
+{
+	struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
+	struct nfp_net *nn = r_vec->nfp_net;
+	struct nfp_net_dp *dp = &nn->dp;
+
+	while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring))
+		continue;
+}
+
+static void nfp_ctrl_poll(unsigned long arg)
+{
+	struct nfp_net_r_vector *r_vec = (void *)arg;
+
+	spin_lock_bh(&r_vec->lock);
+	nfp_net_tx_complete(r_vec->tx_ring);
+	__nfp_ctrl_tx_queued(r_vec);
+	spin_unlock_bh(&r_vec->lock);
+
+	nfp_ctrl_rx(r_vec);
+
+	nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+}
+
 /* Setup and Configuration
  */
 
@@ -1764,10 +2001,21 @@ static void nfp_net_vecs_init(struct nfp_net *nn)
 
 		r_vec = &nn->r_vecs[r];
 		r_vec->nfp_net = nn;
-		r_vec->handler = nfp_net_irq_rxtx;
 		r_vec->irq_entry = entry->entry;
 		r_vec->irq_vector = entry->vector;
 
+		if (nn->dp.netdev) {
+			r_vec->handler = nfp_net_irq_rxtx;
+		} else {
+			r_vec->handler = nfp_ctrl_irq_rxtx;
+
+			__skb_queue_head_init(&r_vec->queue);
+			spin_lock_init(&r_vec->lock);
+			tasklet_init(&r_vec->tasklet, nfp_ctrl_poll,
+				     (unsigned long)r_vec);
+			tasklet_disable(&r_vec->tasklet);
+		}
+
 		cpumask_set_cpu(r, &r_vec->affinity_mask);
 	}
 }
@@ -2034,15 +2282,22 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
 	int err;
 
 	/* Setup NAPI */
-	netif_napi_add(nn->dp.netdev, &r_vec->napi,
-		       nfp_net_poll, NAPI_POLL_WEIGHT);
+	if (nn->dp.netdev)
+		netif_napi_add(nn->dp.netdev, &r_vec->napi,
+			       nfp_net_poll, NAPI_POLL_WEIGHT);
+	else
+		tasklet_enable(&r_vec->tasklet);
 
 	snprintf(r_vec->name, sizeof(r_vec->name),
-		 "%s-rxtx-%d", nn->dp.netdev->name, idx);
+		 "%s-rxtx-%d", nfp_net_name(nn), idx);
 	err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
 			  r_vec);
 	if (err) {
-		netif_napi_del(&r_vec->napi);
+		if (nn->dp.netdev)
+			netif_napi_del(&r_vec->napi);
+		else
+			tasklet_disable(&r_vec->tasklet);
+
 		nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
 		return err;
 	}
@@ -2060,7 +2315,11 @@ static void
 nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
 {
 	irq_set_affinity_hint(r_vec->irq_vector, NULL);
-	netif_napi_del(&r_vec->napi);
+	if (nn->dp.netdev)
+		netif_napi_del(&r_vec->napi);
+	else
+		tasklet_disable(&r_vec->tasklet);
+
 	free_irq(r_vec->irq_vector, r_vec);
 }
 
@@ -2338,6 +2597,24 @@ static int nfp_net_netdev_close(struct net_device *netdev)
 	return 0;
 }
 
+void nfp_ctrl_close(struct nfp_net *nn)
+{
+	int r;
+
+	rtnl_lock();
+
+	for (r = 0; r < nn->dp.num_r_vecs; r++) {
+		disable_irq(nn->r_vecs[r].irq_vector);
+		tasklet_disable(&nn->r_vecs[r].tasklet);
+	}
+
+	nfp_net_clear_config_and_disable(nn);
+
+	nfp_net_close_free_all(nn);
+
+	rtnl_unlock();
+}
+
 /**
  * nfp_net_open_stack() - Start the device from stack's perspective
  * @nn:      NFP Net device to reconfigure
@@ -2453,6 +2730,35 @@ static int nfp_net_netdev_open(struct net_device *netdev)
 	return err;
 }
 
+int nfp_ctrl_open(struct nfp_net *nn)
+{
+	int err, r;
+
+	/* ring dumping depends on vNICs being opened/closed under rtnl */
+	rtnl_lock();
+
+	err = nfp_net_open_alloc_all(nn);
+	if (err)
+		goto err_unlock;
+
+	err = nfp_net_set_config_and_enable(nn);
+	if (err)
+		goto err_free_all;
+
+	for (r = 0; r < nn->dp.num_r_vecs; r++)
+		enable_irq(nn->r_vecs[r].irq_vector);
+
+	rtnl_unlock();
+
+	return 0;
+
+err_free_all:
+	nfp_net_close_free_all(nn);
+err_unlock:
+	rtnl_unlock();
+	return err;
+}
+
 static void nfp_net_set_rx_mode(struct net_device *netdev)
 {
 	struct nfp_net *nn = netdev_priv(netdev);
@@ -3278,6 +3584,7 @@ int nfp_net_init(struct nfp_net *nn)
 
 	/* Chained metadata is signalled by capabilities except in version 4 */
 	nn->dp.chained_metadata_format = nn->fw_ver.major == 4 ||
+					 !nn->dp.netdev ||
 					 nn->cap & NFP_NET_CFG_CTRL_CHAIN_META;
 	if (nn->dp.chained_metadata_format && nn->fw_ver.major != 4)
 		nn->cap &= ~NFP_NET_CFG_CTRL_RSS;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index c8208bf370e0..48a8bf97645e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -71,8 +71,11 @@
 #define NFP_NET_META_FIELD_SIZE		4
 #define NFP_NET_META_HASH		1 /* next field carries hash type */
 #define NFP_NET_META_MARK		2
+#define NFP_NET_META_PORTID		5
 #define NFP_NET_META_CSUM		6 /* checksum complete type */
 
+#define	NFP_META_PORT_ID_CTRL		~0U
+
 /**
  * Hash type pre-pended when a RSS hash was computed
  */
-- 
2.11.0

  parent reply	other threads:[~2017-06-06  0:02 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-06-06  0:01 [PATCH net-next 00/16] nfp: ctrl vNIC Jakub Kicinski
2017-06-06  0:01 ` [PATCH net-next 01/16] nfp: reorder open and close functions Jakub Kicinski
2017-06-06  0:01 ` [PATCH net-next 02/16] nfp: split out the allocation part of open Jakub Kicinski
2017-06-06  0:01 ` [PATCH net-next 03/16] nfp: reuse ring free code on close Jakub Kicinski
2017-06-06  0:01 ` [PATCH net-next 04/16] nfp: move nfp_net_vecs_init() Jakub Kicinski
2017-06-06  0:01 ` [PATCH net-next 05/16] nfp: prepare print macros for use without netdev Jakub Kicinski
2017-06-06  0:01 ` [PATCH net-next 06/16] nfp: make sure debug accesses don't depend on netdevs Jakub Kicinski
2017-06-06  0:01 ` [PATCH net-next 07/16] nfp: allow allocation and initialization of netdev-less vNICs Jakub Kicinski
2017-06-06  0:01 ` [PATCH net-next 08/16] nfp: prepare config and enable for working without netdevs Jakub Kicinski
2017-06-06  0:01 ` Jakub Kicinski [this message]
2017-06-06  0:01 ` [PATCH net-next 10/16] nfp: make vNIC ctrl memory mapping function reusable Jakub Kicinski
2017-06-06  0:01 ` [PATCH net-next 11/16] nfp: map all queue controllers at once Jakub Kicinski
2017-06-06  0:01 ` [PATCH net-next 12/16] nfp: don't clutter init code passing fw_ver around Jakub Kicinski
2017-06-06  0:01 ` [PATCH net-next 13/16] nfp: slice the netdev spawning function Jakub Kicinski
2017-06-06  0:01 ` [PATCH net-next 14/16] nfp: allow non-equal distribution of IRQs Jakub Kicinski
2017-06-06  0:01 ` [PATCH net-next 15/16] nfp: create control vNICs and wire up rx/tx Jakub Kicinski
2017-06-06  0:01 ` [PATCH net-next 16/16] nfp: advertise support for NFD ABI 0.5 Jakub Kicinski
2017-06-06  6:16 ` [PATCH net-next 00/16] nfp: ctrl vNIC Jiri Pirko
2017-06-06  7:21   ` Jakub Kicinski
2017-06-06  8:23     ` Jiri Pirko
2017-06-06  9:09       ` Jakub Kicinski
2017-06-06  9:17         ` Jiri Pirko
2017-06-06  9:35           ` Mintz, Yuval
2017-06-06 11:20             ` Jiri Pirko
2017-06-07 17:48               ` Mintz, Yuval
2017-06-08  5:38                 ` Jiri Pirko
2017-06-08  6:33                   ` Mintz, Yuval
2017-06-08  6:38                     ` Jiri Pirko
2017-06-06 19:54           ` Jakub Kicinski
2017-06-07 16:52 ` David Miller

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170606000157.17556-10-jakub.kicinski@netronome.com \
    --to=jakub.kicinski@netronome.com \
    --cc=netdev@vger.kernel.org \
    --cc=oss-drivers@netronome.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.