From mboxrd@z Thu Jan 1 00:00:00 1970 From: Jerin Jacob Subject: [PATCH v3 15/20] thunderx/nicvf: add rx queue start and stop support Date: Tue, 7 Jun 2016 22:10:27 +0530 Message-ID: <1465317632-11471-16-git-send-email-jerin.jacob@caviumnetworks.com> References: <1464540424-12631-1-git-send-email-jerin.jacob@caviumnetworks.com> <1465317632-11471-1-git-send-email-jerin.jacob@caviumnetworks.com> Mime-Version: 1.0 Content-Type: text/plain Cc: , , Jerin Jacob , Maciej Czekaj , Kamil Rytarowski , Zyta Szpak , Slawomir Rosek , Radoslaw Biernacki To: Return-path: Received: from na01-bn1-obe.outbound.protection.outlook.com (mail-bn1bon0068.outbound.protection.outlook.com [157.56.111.68]) by dpdk.org (Postfix) with ESMTP id F368E9AD0 for ; Tue, 7 Jun 2016 18:42:22 +0200 (CEST) In-Reply-To: <1465317632-11471-1-git-send-email-jerin.jacob@caviumnetworks.com> List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Signed-off-by: Jerin Jacob Signed-off-by: Maciej Czekaj Signed-off-by: Kamil Rytarowski Signed-off-by: Zyta Szpak Signed-off-by: Slawomir Rosek Signed-off-by: Radoslaw Biernacki --- drivers/net/thunderx/nicvf_ethdev.c | 175 ++++++++++++++++++++++++++++++++++++ drivers/net/thunderx/nicvf_rxtx.c | 18 ++++ drivers/net/thunderx/nicvf_rxtx.h | 1 + 3 files changed, 194 insertions(+) diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c index 5da07da..ba32803 100644 --- a/drivers/net/thunderx/nicvf_ethdev.c +++ b/drivers/net/thunderx/nicvf_ethdev.c @@ -88,6 +88,8 @@ static int nicvf_dev_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); static int nicvf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); +static int nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx); +static int nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx); static int nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, uint16_t nb_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, @@ -594,6 +596,54 @@ nicvf_tx_queue_reset(struct nicvf_txq *txq) txq->xmit_bufs = 0; } + +static inline int +nicvf_configure_cpi(struct rte_eth_dev *dev) +{ + struct nicvf *nic = nicvf_pmd_priv(dev); + uint16_t qidx, qcnt; + int ret; + + /* Count started rx queues */ + for (qidx = qcnt = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) + if (dev->data->rx_queue_state[qidx] == + RTE_ETH_QUEUE_STATE_STARTED) + qcnt++; + + nic->cpi_alg = CPI_ALG_NONE; + ret = nicvf_mbox_config_cpi(nic, qcnt); + if (ret) + PMD_INIT_LOG(ERR, "Failed to configure CPI %d", ret); + + return ret; +} + +static int +nicvf_configure_rss_reta(struct rte_eth_dev *dev) +{ + struct nicvf *nic = nicvf_pmd_priv(dev); + unsigned int idx, qmap_size; + uint8_t qmap[RTE_MAX_QUEUES_PER_PORT]; + uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE]; + + if (nic->cpi_alg != CPI_ALG_NONE) + return -EINVAL; + + /* Prepare queue map */ + for (idx = 0, qmap_size = 0; idx < dev->data->nb_rx_queues; idx++) { + if (dev->data->rx_queue_state[idx] == + RTE_ETH_QUEUE_STATE_STARTED) + qmap[qmap_size++] = idx; + } + + /* Update default RSS RETA */ + for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++) + default_reta[idx] = qmap[idx % qmap_size]; + + return nicvf_rss_reta_update(nic, default_reta, + NIC_MAX_RSS_IDR_TBL_SIZE); +} + static void nicvf_dev_tx_queue_release(void *sq) { @@ -719,6 +769,33 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, return 0; } +static inline void +nicvf_rx_queue_release_mbufs(struct nicvf_rxq *rxq) +{ + uint32_t rxq_cnt; + uint32_t nb_pkts, released_pkts = 0; + uint32_t refill_cnt = 0; + struct rte_eth_dev *dev = rxq->nic->eth_dev; + struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH]; + + if (dev->rx_pkt_burst == NULL) + return; + + while ((rxq_cnt = nicvf_dev_rx_queue_count(dev, rxq->queue_id))) { + nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts, + NICVF_MAX_RX_FREE_THRESH); + PMD_DRV_LOG(INFO, "nb_pkts=%d rxq_cnt=%d", nb_pkts, rxq_cnt); + while (nb_pkts) { + rte_pktmbuf_free_seg(rx_pkts[--nb_pkts]); + released_pkts++; + } + } + + refill_cnt += nicvf_dev_rbdr_refill(dev, rxq->queue_id); + PMD_DRV_LOG(INFO, "free_cnt=%d refill_cnt=%d", + released_pkts, refill_cnt); +} + static void nicvf_rx_queue_reset(struct nicvf_rxq *rxq) { @@ -727,6 +804,69 @@ nicvf_rx_queue_reset(struct nicvf_rxq *rxq) rxq->recv_buffers = 0; } +static inline int +nicvf_start_rx_queue(struct rte_eth_dev *dev, uint16_t qidx) +{ + struct nicvf *nic = nicvf_pmd_priv(dev); + struct nicvf_rxq *rxq; + int ret; + + if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) + return 0; + + /* Update rbdr pointer to all rxq */ + rxq = dev->data->rx_queues[qidx]; + rxq->shared_rbdr = nic->rbdr; + + ret = nicvf_qset_rq_config(nic, qidx, rxq); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to configure rq %d %d", qidx, ret); + goto config_rq_error; + } + ret = nicvf_qset_cq_config(nic, qidx, rxq); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to configure cq %d %d", qidx, ret); + goto config_cq_error; + } + + dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; + +config_cq_error: + nicvf_qset_cq_reclaim(nic, qidx); +config_rq_error: + nicvf_qset_rq_reclaim(nic, qidx); + return ret; +} + +static inline int +nicvf_stop_rx_queue(struct rte_eth_dev *dev, uint16_t qidx) +{ + struct nicvf *nic = nicvf_pmd_priv(dev); + struct nicvf_rxq *rxq; + int ret, other_error; + + if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) + return 0; + + ret = nicvf_qset_rq_reclaim(nic, qidx); + if (ret) + PMD_INIT_LOG(ERR, "Failed to reclaim rq %d %d", qidx, ret); + + other_error = ret; + rxq = dev->data->rx_queues[qidx]; + nicvf_rx_queue_release_mbufs(rxq); + nicvf_rx_queue_reset(rxq); + + ret = nicvf_qset_cq_reclaim(nic, qidx); + if (ret) + PMD_INIT_LOG(ERR, "Failed to reclaim cq %d %d", qidx, ret); + + other_error |= ret; + dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; + return other_error; +} + static void nicvf_dev_rx_queue_release(void *rx_queue) { @@ -739,6 +879,39 @@ nicvf_dev_rx_queue_release(void *rx_queue) } static int +nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) +{ + int ret; + + if (qidx >= nicvf_pmd_priv(dev)->eth_dev->data->nb_rx_queues) + return -EINVAL; + + ret = nicvf_start_rx_queue(dev, qidx); + if (ret) + return ret; + + ret = nicvf_configure_cpi(dev); + if (ret) + return ret; + + return nicvf_configure_rss_reta(dev); +} + +static int +nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) +{ + int ret; + + if (qidx >= nicvf_pmd_priv(dev)->eth_dev->data->nb_rx_queues) + return -EINVAL; + + ret = nicvf_stop_rx_queue(dev, qidx); + ret |= nicvf_configure_cpi(dev); + ret |= nicvf_configure_rss_reta(dev); + return ret; +} + +static int nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, uint16_t nb_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, @@ -965,6 +1138,8 @@ static const struct eth_dev_ops nicvf_eth_dev_ops = { .reta_query = nicvf_dev_reta_query, .rss_hash_update = nicvf_dev_rss_hash_update, .rss_hash_conf_get = nicvf_dev_rss_hash_conf_get, + .rx_queue_start = nicvf_dev_rx_queue_start, + .rx_queue_stop = nicvf_dev_rx_queue_stop, .rx_queue_setup = nicvf_dev_rx_queue_setup, .rx_queue_release = nicvf_dev_rx_queue_release, .rx_queue_count = nicvf_dev_rx_queue_count, diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c index 8031685..e8c605d 100644 --- a/drivers/net/thunderx/nicvf_rxtx.c +++ b/drivers/net/thunderx/nicvf_rxtx.c @@ -580,3 +580,21 @@ nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx) rxq = (struct nicvf_rxq *)dev->data->rx_queues[queue_idx]; return nicvf_addr_read(rxq->cq_status) & NICVF_CQ_CQE_COUNT_MASK; } + +uint32_t +nicvf_dev_rbdr_refill(struct rte_eth_dev *dev, uint16_t queue_idx) +{ + struct nicvf_rxq *rxq; + uint32_t to_process; + uint32_t rx_free; + + rxq = (struct nicvf_rxq *)dev->data->rx_queues[queue_idx]; + to_process = rxq->recv_buffers; + while (rxq->recv_buffers > 0) { + rx_free = RTE_MIN(rxq->recv_buffers, NICVF_MAX_RX_FREE_THRESH); + rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rx_free); + } + + assert(rxq->recv_buffers == 0); + return to_process; +} diff --git a/drivers/net/thunderx/nicvf_rxtx.h b/drivers/net/thunderx/nicvf_rxtx.h index 44cef06..3484928 100644 --- a/drivers/net/thunderx/nicvf_rxtx.h +++ b/drivers/net/thunderx/nicvf_rxtx.h @@ -85,6 +85,7 @@ fill_sq_desc_gather(union sq_entry_t *entry, struct rte_mbuf *pkt) #endif uint32_t nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx); +uint32_t nicvf_dev_rbdr_refill(struct rte_eth_dev *dev, uint16_t queue_idx); uint16_t nicvf_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t pkts); uint16_t nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts, -- 2.5.5