All of lore.kernel.org
 help / color / mirror / Atom feed
From: Stephen Hemminger <stephen-OTpzqLSitTUnbdJkjeBofR2eb7JE58TQ@public.gmane.org>
To: dev-VfR2kkLFssw@public.gmane.org
Cc: Stephen Hemminger <shemming-43mecJUBy8ZBDgjK7y7TUQ@public.gmane.org>
Subject: [PATCH 1/5] xen: allow choosing dom0 support at runtime
Date: Sun, 15 Feb 2015 10:24:45 -0500	[thread overview]
Message-ID: <1424013889-2226-1-git-send-email-shemming@brocade.com> (raw)

The previous code would only allow building library and application
so that it ran on Xen DOM0 or not on DOM0. This changes that to
a runtime flag.

Signed-off-by: Stephen Hemminger <stephen-OTpzqLSitTUnbdJkjeBofR2eb7JE58TQ@public.gmane.org>
---
v2 -- fix i40e as well

 lib/librte_eal/common/include/rte_memory.h |  4 +++
 lib/librte_eal/linuxapp/eal/eal_memory.c   |  7 ++++
 lib/librte_ether/rte_ethdev.c              | 22 ++++++++++++
 lib/librte_ether/rte_ethdev.h              | 23 ++++++++++++
 lib/librte_mempool/rte_mempool.c           | 26 +++++++-------
 lib/librte_pmd_e1000/em_rxtx.c             | 30 +++-------------
 lib/librte_pmd_e1000/igb_rxtx.c            | 52 +++++++++------------------
 lib/librte_pmd_i40e/i40e_ethdev.c          | 16 +++++----
 lib/librte_pmd_i40e/i40e_fdir.c            |  8 +++--
 lib/librte_pmd_i40e/i40e_rxtx.c            | 57 +++++++++++++++++------------
 lib/librte_pmd_ixgbe/ixgbe_rxtx.c          | 58 +++++++++---------------------
 11 files changed, 156 insertions(+), 147 deletions(-)

diff --git a/lib/librte_eal/common/include/rte_memory.h b/lib/librte_eal/common/include/rte_memory.h
index 7f8103f..ab6c1ff 100644
--- a/lib/librte_eal/common/include/rte_memory.h
+++ b/lib/librte_eal/common/include/rte_memory.h
@@ -176,6 +176,10 @@ unsigned rte_memory_get_nchannel(void);
 unsigned rte_memory_get_nrank(void);
 
 #ifdef RTE_LIBRTE_XEN_DOM0
+
+/**< Internal use only - should DOM0 memory mapping be used */
+extern int is_xen_dom0_supported(void);
+
 /**
  * Return the physical address of elt, which is an element of the pool mp.
  *
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index a67a1b0..4afda2a 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -98,6 +98,13 @@
 #include "eal_filesystem.h"
 #include "eal_hugepages.h"
 
+#ifdef RTE_LIBRTE_XEN_DOM0
+int is_xen_dom0_supported(void)
+{
+	return internal_config.xen_dom0_support;
+}
+#endif
+
 /**
  * @file
  * Huge page mapping under linux
diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index ea3a1fb..457e0bc 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -2825,6 +2825,27 @@ _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
 	}
 	rte_spinlock_unlock(&rte_eth_dev_cb_lock);
 }
+
+const struct rte_memzone *
+rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
+			 uint16_t queue_id, size_t size, unsigned align,
+			 int socket_id)
+{
+	char z_name[RTE_MEMZONE_NAMESIZE];
+	const struct rte_memzone *mz;
+
+	snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+		 dev->driver->pci_drv.name, ring_name,
+		 dev->data->port_id, queue_id);
+
+	mz = rte_memzone_lookup(z_name);
+	if (mz)
+		return mz;
+
+	return rte_memzone_reserve_bounded(z_name, size,
+					   socket_id, 0, align, RTE_PGSIZE_2M);
+}
+
 #ifdef RTE_NIC_BYPASS
 int rte_eth_dev_bypass_init(uint8_t port_id)
 {
@@ -3003,6 +3024,7 @@ rte_eth_dev_bypass_wd_reset(uint8_t port_id)
 	(*dev->dev_ops->bypass_wd_reset)(dev);
 	return 0;
 }
+
 #endif
 
 int
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index 1200c1c..747acb5 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -3664,6 +3664,29 @@ int rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_ty
 int rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
 			enum rte_filter_op filter_op, void *arg);
 
+/**
+ * Create memzone for HW rings.
+ * malloc can't be used as the physical address is needed.
+ * If the memzone is already created, then this function returns a ptr
+ * to the old one.
+ *
+ * @param eth_dev
+ *   The *eth_dev* pointer is the address of the *rte_eth_dev* structure
+ * @param name
+ *   The name of the memory zone
+ * @param queue_id
+ *   The index of the queue to add to name
+ * @param size
+ *   The sizeof of the memory area
+ * @param align
+ *   Alignment for resulting memzone. Must be a power of 2.
+ * @param socket_id
+ *   The *socket_id* argument is the socket identifier in case of NUMA.
+ */
+const struct rte_memzone *
+rte_eth_dma_zone_reserve(const struct rte_eth_dev *eth_dev, const char *name,
+			 uint16_t queue_id, size_t size,
+			 unsigned align, int socket_id);
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index 4cf6c25..5056a4f 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -372,19 +372,21 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
 		   int socket_id, unsigned flags)
 {
 #ifdef RTE_LIBRTE_XEN_DOM0
-	return (rte_dom0_mempool_create(name, n, elt_size,
-		cache_size, private_data_size,
-		mp_init, mp_init_arg,
-		obj_init, obj_init_arg,
-		socket_id, flags));
-#else
-	return (rte_mempool_xmem_create(name, n, elt_size,
-		cache_size, private_data_size,
-		mp_init, mp_init_arg,
-		obj_init, obj_init_arg,
-		socket_id, flags,
-		NULL, NULL, MEMPOOL_PG_NUM_DEFAULT, MEMPOOL_PG_SHIFT_MAX));
+	if (is_xen_dom0_supported())
+		return (rte_dom0_mempool_create(name, n, elt_size,
+					cache_size, private_data_size,
+					mp_init, mp_init_arg,
+					obj_init, obj_init_arg,
+					socket_id, flags));
+	else
 #endif
+		return (rte_mempool_xmem_create(name, n, elt_size,
+					cache_size, private_data_size,
+					mp_init, mp_init_arg,
+					obj_init, obj_init_arg,
+					socket_id, flags,
+					NULL, NULL, MEMPOOL_PG_NUM_DEFAULT,
+					MEMPOOL_PG_SHIFT_MAX));
 }
 
 /*
diff --git a/lib/librte_pmd_e1000/em_rxtx.c b/lib/librte_pmd_e1000/em_rxtx.c
index aa0b88c..9e09cfa 100644
--- a/lib/librte_pmd_e1000/em_rxtx.c
+++ b/lib/librte_pmd_e1000/em_rxtx.c
@@ -1104,28 +1104,6 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 #define	EM_MAX_BUF_SIZE     16384
 #define EM_RCTL_FLXBUF_STEP 1024
 
-static const struct rte_memzone *
-ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
-		uint16_t queue_id, uint32_t ring_size, int socket_id)
-{
-	const struct rte_memzone *mz;
-	char z_name[RTE_MEMZONE_NAMESIZE];
-
-	snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
-		dev->driver->pci_drv.name, ring_name, dev->data->port_id,
-		queue_id);
-
-	if ((mz = rte_memzone_lookup(z_name)) != 0)
-		return (mz);
-
-#ifdef RTE_LIBRTE_XEN_DOM0
-	return rte_memzone_reserve_bounded(z_name, ring_size,
-			socket_id, 0, RTE_CACHE_LINE_SIZE, RTE_PGSIZE_2M);
-#else
-	return rte_memzone_reserve(z_name, ring_size, socket_id, 0);
-#endif
-}
-
 static void
 em_tx_queue_release_mbufs(struct em_tx_queue *txq)
 {
@@ -1273,8 +1251,8 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 	 * resizing in later calls to the queue setup function.
 	 */
 	tsize = sizeof (txq->tx_ring[0]) * EM_MAX_RING_DESC;
-	if ((tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize,
-			socket_id)) == NULL)
+	if ((tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize,
+					   RTE_CACHE_LINE_SIZE, socket_id)) == NULL)
 		return (-ENOMEM);
 
 	/* Allocate the tx queue data structure. */
@@ -1400,8 +1378,8 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 
 	/* Allocate RX ring for max possible mumber of hardware descriptors. */
 	rsize = sizeof (rxq->rx_ring[0]) * EM_MAX_RING_DESC;
-	if ((rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize,
-			socket_id)) == NULL)
+	if ((rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize,
+					   RTE_CACHE_LINE_SIZE, socket_id)) == NULL)
 		return (-ENOMEM);
 
 	/* Allocate the RX queue data structure. */
diff --git a/lib/librte_pmd_e1000/igb_rxtx.c b/lib/librte_pmd_e1000/igb_rxtx.c
index 5c394a9..d36469b 100644
--- a/lib/librte_pmd_e1000/igb_rxtx.c
+++ b/lib/librte_pmd_e1000/igb_rxtx.c
@@ -1109,29 +1109,6 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 #define IGB_MIN_RING_DESC 32
 #define IGB_MAX_RING_DESC 4096
 
-static const struct rte_memzone *
-ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
-		      uint16_t queue_id, uint32_t ring_size, int socket_id)
-{
-	char z_name[RTE_MEMZONE_NAMESIZE];
-	const struct rte_memzone *mz;
-
-	snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
-			dev->driver->pci_drv.name, ring_name,
-				dev->data->port_id, queue_id);
-	mz = rte_memzone_lookup(z_name);
-	if (mz)
-		return mz;
-
-#ifdef RTE_LIBRTE_XEN_DOM0
-	return rte_memzone_reserve_bounded(z_name, ring_size,
-			socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);
-#else
-	return rte_memzone_reserve_aligned(z_name, ring_size,
-			socket_id, 0, IGB_ALIGN);
-#endif
-}
-
 static void
 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
 {
@@ -1265,8 +1242,8 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 	 * resizing in later calls to the queue setup function.
 	 */
 	size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
-	tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
-					size, socket_id);
+	tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size,
+				      IGB_ALIGN, socket_id);
 	if (tz == NULL) {
 		igb_tx_queue_release(txq);
 		return (-ENOMEM);
@@ -1284,12 +1261,14 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->port_id = dev->data->port_id;
 
 	txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
-#ifndef RTE_LIBRTE_XEN_DOM0
-	txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
-#else
-	txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+#ifdef RTE_LIBRTE_XEN_DOM0
+	if (is_xen_dom0_supported())
+		txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+	else
 #endif
-	 txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
+		txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
+
+	txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
 	/* Allocate software ring */
 	txq->sw_ring = rte_zmalloc("txq->sw_ring",
 				   sizeof(struct igb_tx_entry) * nb_desc,
@@ -1414,18 +1393,21 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 	 *  resizing in later calls to the queue setup function.
 	 */
 	size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
-	rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
+	rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size,
+				      IGB_ALIGN, socket_id);
 	if (rz == NULL) {
 		igb_rx_queue_release(rxq);
 		return (-ENOMEM);
 	}
 	rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
 	rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
-#ifndef RTE_LIBRTE_XEN_DOM0
-	rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
-#else
-	rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+#ifdef RTE_LIBRTE_XEN_DOM0
+	if (is_xen_dom0_supported())
+		rxq->rx_ring_phys_addr =
+			rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+	else
 #endif
+		rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
 	rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
 
 	/* Allocate software ring. */
diff --git a/lib/librte_pmd_i40e/i40e_ethdev.c b/lib/librte_pmd_i40e/i40e_ethdev.c
index 9fa6bec..44a012f 100644
--- a/lib/librte_pmd_i40e/i40e_ethdev.c
+++ b/lib/librte_pmd_i40e/i40e_ethdev.c
@@ -1979,11 +1979,12 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
 	id++;
 	snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, id);
 #ifdef RTE_LIBRTE_XEN_DOM0
-	mz = rte_memzone_reserve_bounded(z_name, size, 0, 0, alignment,
-							RTE_PGSIZE_2M);
-#else
-	mz = rte_memzone_reserve_aligned(z_name, size, 0, 0, alignment);
+	if (is_xen_dom0_supported())
+		mz = rte_memzone_reserve_bounded(z_name, size, 0, 0, alignment,
+						 RTE_PGSIZE_2M);
+	else
 #endif
+		mz = rte_memzone_reserve_aligned(z_name, size, 0, 0, alignment);
 	if (!mz)
 		return I40E_ERR_NO_MEMORY;
 
@@ -1991,10 +1992,11 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
 	mem->size = size;
 	mem->va = mz->addr;
 #ifdef RTE_LIBRTE_XEN_DOM0
-	mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
-#else
-	mem->pa = mz->phys_addr;
+	if (is_xen_dom0_supported())
+		mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
+	else
 #endif
+		mem->pa = mz->phys_addr;
 
 	return I40E_SUCCESS;
 }
diff --git a/lib/librte_pmd_i40e/i40e_fdir.c b/lib/librte_pmd_i40e/i40e_fdir.c
index 68511c8..4c5b185 100644
--- a/lib/librte_pmd_i40e/i40e_fdir.c
+++ b/lib/librte_pmd_i40e/i40e_fdir.c
@@ -274,10 +274,12 @@ i40e_fdir_setup(struct i40e_pf *pf)
 	}
 	pf->fdir.prg_pkt = mz->addr;
 #ifdef RTE_LIBRTE_XEN_DOM0
-	pf->fdir.dma_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
-#else
-	pf->fdir.dma_addr = (uint64_t)mz->phys_addr;
+	if (is_xen_dom0_supported())
+		pf->fdir.dma_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
+	else
 #endif
+		pf->fdir.dma_addr = (uint64_t)mz->phys_addr;
+
 	pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);
 	PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
 		    vsi->base_queue);
diff --git a/lib/librte_pmd_i40e/i40e_rxtx.c b/lib/librte_pmd_i40e/i40e_rxtx.c
index 2beae3c..bd01dc0 100644
--- a/lib/librte_pmd_i40e/i40e_rxtx.c
+++ b/lib/librte_pmd_i40e/i40e_rxtx.c
@@ -1796,10 +1796,11 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	memset(rz->addr, 0, ring_size);
 
 #ifdef RTE_LIBRTE_XEN_DOM0
-	rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
-#else
-	rxq->rx_ring_phys_addr = (uint64_t)rz->phys_addr;
+	if (is_xen_dom0_supported())
+		rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+	else
 #endif
+		rxq->rx_ring_phys_addr = (uint64_t)rz->phys_addr;
 
 	rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
 
@@ -2079,10 +2080,11 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
 #ifdef RTE_LIBRTE_XEN_DOM0
-	txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
-#else
-	txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
+	if (is_xen_dom0_supported())
+		txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+	else
 #endif
+		txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
 	txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
 
 	/* Allocate software ring */
@@ -2147,12 +2149,13 @@ i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev,
 		return mz;
 
 #ifdef RTE_LIBRTE_XEN_DOM0
-	return rte_memzone_reserve_bounded(z_name, ring_size,
-		socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M);
-#else
-	return rte_memzone_reserve_aligned(z_name, ring_size,
-				socket_id, 0, I40E_ALIGN);
-#endif
+	if (is_xen_dom0_supported())
+		return rte_memzone_reserve_bounded(z_name, ring_size,
+				   socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M);
+	else
+#endif	
+		return rte_memzone_reserve_aligned(z_name, ring_size,
+				   socket_id, 0, I40E_ALIGN);
 }
 
 const struct rte_memzone *
@@ -2164,12 +2167,14 @@ i40e_memzone_reserve(const char *name, uint32_t len, int socket_id)
 	if (mz)
 		return mz;
 #ifdef RTE_LIBRTE_XEN_DOM0
-	mz = rte_memzone_reserve_bounded(name, len,
-		socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M);
-#else
-	mz = rte_memzone_reserve_aligned(name, len,
-				socket_id, 0, I40E_ALIGN);
+	if (is_xen_dom0_supported())
+		mz = rte_memzone_reserve_bounded(name, len,
+				socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M);
+	else
 #endif
+		mz = rte_memzone_reserve_aligned(name, len,
+				socket_id, 0, I40E_ALIGN);
+
 	return mz;
 }
 
@@ -2573,10 +2578,13 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
 	txq->vsi = pf->fdir.fdir_vsi;
 
 #ifdef RTE_LIBRTE_XEN_DOM0
-	txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
-#else
-	txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
+	if (is_xen_dom0_supported())
+		txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id,
+							 tz->phys_addr);
+	else
 #endif
+		txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
+
 	txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
 	/*
 	 * don't need to allocate software ring and reset for the fdir
@@ -2633,10 +2641,13 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
 	rxq->vsi = pf->fdir.fdir_vsi;
 
 #ifdef RTE_LIBRTE_XEN_DOM0
-	rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
-#else
-	rxq->rx_ring_phys_addr = (uint64_t)rz->phys_addr;
+	if (is_xen_dom0_supported())
+		rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id,
+							 rz->phys_addr);
+	else
 #endif
+		rxq->rx_ring_phys_addr = (uint64_t)rz->phys_addr;
+
 	rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
 
 	/*
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
index e6766b3..303144d 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
@@ -1656,35 +1656,6 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 #define IXGBE_MIN_RING_DESC 32
 #define IXGBE_MAX_RING_DESC 4096
 
-/*
- * Create memzone for HW rings. malloc can't be used as the physical address is
- * needed. If the memzone is already created, then this function returns a ptr
- * to the old one.
- */
-static const struct rte_memzone *
-ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
-		      uint16_t queue_id, uint32_t ring_size, int socket_id)
-{
-	char z_name[RTE_MEMZONE_NAMESIZE];
-	const struct rte_memzone *mz;
-
-	snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
-			dev->driver->pci_drv.name, ring_name,
-			dev->data->port_id, queue_id);
-
-	mz = rte_memzone_lookup(z_name);
-	if (mz)
-		return mz;
-
-#ifdef RTE_LIBRTE_XEN_DOM0
-	return rte_memzone_reserve_bounded(z_name, ring_size,
-		socket_id, 0, IXGBE_ALIGN, RTE_PGSIZE_2M);
-#else
-	return rte_memzone_reserve_aligned(z_name, ring_size,
-		socket_id, 0, IXGBE_ALIGN);
-#endif
-}
-
 static void
 ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
 {
@@ -1920,9 +1891,9 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	 * handle the maximum ring size is allocated in order to allow for
 	 * resizing in later calls to the queue setup function.
 	 */
-	tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
+	tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
 			sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
-			socket_id);
+			IXGBE_ALIGN, socket_id);
 	if (tz == NULL) {
 		ixgbe_tx_queue_release(txq);
 		return (-ENOMEM);
@@ -1950,11 +1921,14 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
 	else
 		txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
-#ifndef	RTE_LIBRTE_XEN_DOM0
-	txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
-#else
-	txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+
+#ifdef RTE_LIBRTE_XEN_DOM0
+	if (is_xen_dom0_supported())
+		txq->tx_ring_phys_addr =
+			rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+	else
 #endif
+		txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
 	txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
 
 	/* Allocate software ring */
@@ -2195,8 +2169,8 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	 * handle the maximum ring size is allocated in order to allow for
 	 * resizing in later calls to the queue setup function.
 	 */
-	rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx,
-				   RX_RING_SZ, socket_id);
+	rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
+				      RX_RING_SZ, IXGBE_ALIGN, socket_id);
 	if (rz == NULL) {
 		ixgbe_rx_queue_release(rxq);
 		return (-ENOMEM);
@@ -2223,11 +2197,13 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 		rxq->rdh_reg_addr =
 			IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
 	}
-#ifndef RTE_LIBRTE_XEN_DOM0
-	rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
-#else
-	rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+#ifdef RTE_LIBRTE_XEN_DOM0
+	if (is_xen_dom0_supported())
+		rxq->rx_ring_phys_addr =
+			rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+	else
 #endif
+		rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
 	rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
 
 	/*
-- 
2.1.4

             reply	other threads:[~2015-02-15 15:24 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-02-15 15:24 Stephen Hemminger [this message]
     [not found] ` <1424013889-2226-1-git-send-email-shemming-43mecJUBy8ZBDgjK7y7TUQ@public.gmane.org>
2015-02-15 15:24   ` [PATCH 2/5] enic: fix device to work with Xen DOM0 Stephen Hemminger
     [not found]     ` <1424013889-2226-2-git-send-email-shemming-43mecJUBy8ZBDgjK7y7TUQ@public.gmane.org>
2015-03-10  7:08       ` Liu, Jijiang
2015-02-15 15:24   ` [PATCH 3/5] xen: add phys-addr command line argument Stephen Hemminger
     [not found]     ` <1424013889-2226-3-git-send-email-shemming-43mecJUBy8ZBDgjK7y7TUQ@public.gmane.org>
2015-02-26  7:55       ` Liu, Jijiang
     [not found]         ` <1ED644BD7E0A5F4091CF203DAFB8E4CC01DDF09A-0J0gbvR4kThpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
2015-02-26 16:09           ` Stephen Hemminger
2015-02-15 15:24   ` [PATCH 4/5] xen: add uio driver Stephen Hemminger
2016-03-22  9:55     ` [PATCH v3 0/3] xen: netfront poll mode driver Jan Blunck
2016-03-22  9:55       ` [PATCH v3 1/3] xen: Add UIO kernel driver Jan Blunck
2016-03-22 10:42         ` Thomas Monjalon
2016-03-22 11:04           ` Jan Blunck
2016-03-22 11:27             ` Thomas Monjalon
2016-03-22 14:39               ` Jan Blunck
2016-03-22  9:55       ` [PATCH v3 2/3] xen: Add netfront poll mode driver Jan Blunck
2016-03-22 10:07         ` David Marchand
2016-03-22 10:42           ` Jan Blunck
2016-03-22  9:55       ` [PATCH v3 3/3] xen: Add documentation Jan Blunck
2016-04-20 14:18       ` [PATCH v3 0/3] xen: netfront poll mode driver Bruce Richardson
2016-05-03  9:38         ` Xie, Huawei
2017-02-05 14:44       ` Thomas Monjalon
2017-02-06 14:27         ` Konrad Rzeszutek Wilk
2015-02-15 15:24   ` [PATCH 5/5] xen: net-front " Stephen Hemminger
2015-03-10  7:02   ` [PATCH 1/5] xen: allow choosing dom0 support at runtime Liu, Jijiang
2015-07-09  0:10 ` Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1424013889-2226-1-git-send-email-shemming@brocade.com \
    --to=stephen-otpzqlsittunbdjkjebofr2eb7je58tq@public.gmane.org \
    --cc=dev-VfR2kkLFssw@public.gmane.org \
    --cc=shemming-43mecJUBy8ZBDgjK7y7TUQ@public.gmane.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.