All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jerin Jacob <jerinjacobk@gmail.com>
To: Pavan Nikhilesh <pbhagavatula@marvell.com>
Cc: Jerin Jacob <jerinj@marvell.com>,
	Nithin Dabilpuram <ndabilpuram@marvell.com>,
	 Kiran Kumar K <kirankumark@marvell.com>,
	Sunil Kumar Kori <skori@marvell.com>,
	 Satha Rao <skoteshwar@marvell.com>,
	Shijith Thotton <sthotton@marvell.com>, dpdk-dev <dev@dpdk.org>
Subject: Re: [dpdk-dev] [PATCH v8 7/7] event/cnxk: add Tx event vector fastpath
Date: Tue, 13 Jul 2021 19:06:17 +0530	[thread overview]
Message-ID: <CALBAE1MytR=nJYaj2=4vxLy4CFAJ3CPBnLhScao2AOW0MWqZuA@mail.gmail.com> (raw)
In-Reply-To: <20210711232958.2191-7-pbhagavatula@marvell.com>

On Mon, Jul 12, 2021 at 5:01 AM <pbhagavatula@marvell.com> wrote:
>
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
> Add Tx event vector fastpath, integrate event vector Tx routine
> into Tx burst.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>

* **Added support for Marvell CN10K, CN9K, event Rx/Tx adapter.**
  * Added Rx/Tx adapter support for event/cnxk when the ethernet
device requested
    is net/cnxk.

Documentation build issue here

[2757/2784] Generating html_guides with a custom command
FAILED: doc/guides/html
/usr/bin/python ../buildtools/call-sphinx-build.py
/usr/bin/sphinx-build 21.08.0-rc1
/export/dpdk-next-eventdev/doc/guides
/export/dpdk-next-eventdev/build/doc/guides -W

Warning, treated as error:
/export/dpdk-next-eventdev/doc/guides/rel_notes/release_21_08.rst:122:Unexpected
indentation.

> ---
>  drivers/common/cnxk/roc_sso.h        |  23 ++++++
>  drivers/event/cnxk/cn10k_eventdev.c  |   3 +-
>  drivers/event/cnxk/cn10k_worker.h    | 104 +++++++++++++++++++++++++--
>  drivers/event/cnxk/cn9k_worker.h     |   4 +-
>  drivers/event/cnxk/cnxk_worker.h     |  22 ------
>  drivers/net/cnxk/cn10k_tx.c          |   2 +-
>  drivers/net/cnxk/cn10k_tx.h          |  52 +++++++++-----
>  drivers/net/cnxk/cn10k_tx_mseg.c     |   3 +-
>  drivers/net/cnxk/cn10k_tx_vec.c      |   2 +-
>  drivers/net/cnxk/cn10k_tx_vec_mseg.c |   2 +-
>  10 files changed, 165 insertions(+), 52 deletions(-)
>
> diff --git a/drivers/common/cnxk/roc_sso.h b/drivers/common/cnxk/roc_sso.h
> index a6030e7d8a..316c6ccd59 100644
> --- a/drivers/common/cnxk/roc_sso.h
> +++ b/drivers/common/cnxk/roc_sso.h
> @@ -44,6 +44,29 @@ struct roc_sso {
>         uint8_t reserved[ROC_SSO_MEM_SZ] __plt_cache_aligned;
>  } __plt_cache_aligned;
>
> +static __rte_always_inline void
> +roc_sso_hws_head_wait(uintptr_t tag_op)
> +{
> +#ifdef RTE_ARCH_ARM64
> +       uint64_t tag;
> +
> +       asm volatile(PLT_CPU_FEATURE_PREAMBLE
> +                    "          ldr %[tag], [%[tag_op]] \n"
> +                    "          tbnz %[tag], 35, done%=         \n"
> +                    "          sevl                            \n"
> +                    "rty%=:    wfe                             \n"
> +                    "          ldr %[tag], [%[tag_op]] \n"
> +                    "          tbz %[tag], 35, rty%=           \n"
> +                    "done%=:                                   \n"
> +                    : [tag] "=&r"(tag)
> +                    : [tag_op] "r"(tag_op));
> +#else
> +       /* Wait for the SWTAG/SWTAG_FULL operation */
> +       while (!(plt_read64(tag_op) & BIT_ULL(35)))
> +               ;
> +#endif
> +}
> +
>  /* SSO device initialization */
>  int __roc_api roc_sso_dev_init(struct roc_sso *roc_sso);
>  int __roc_api roc_sso_dev_fini(struct roc_sso *roc_sso);
> diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
> index e85fa4785d..6f37c5bd23 100644
> --- a/drivers/event/cnxk/cn10k_eventdev.c
> +++ b/drivers/event/cnxk/cn10k_eventdev.c
> @@ -782,7 +782,8 @@ cn10k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
>         if (ret)
>                 *caps = 0;
>         else
> -               *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
> +               *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT |
> +                       RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR;
>
>         return 0;
>  }
> diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
> index 7a48a6b17d..9cc0992063 100644
> --- a/drivers/event/cnxk/cn10k_worker.h
> +++ b/drivers/event/cnxk/cn10k_worker.h
> @@ -308,29 +308,120 @@ uint16_t __rte_hot cn10k_sso_hws_enq_fwd_burst(void *port,
>  NIX_RX_FASTPATH_MODES
>  #undef R
>
> -static __rte_always_inline const struct cn10k_eth_txq *
> +static __rte_always_inline struct cn10k_eth_txq *
>  cn10k_sso_hws_xtract_meta(struct rte_mbuf *m,
>                           const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT])
>  {
> -       return (const struct cn10k_eth_txq *)
> +       return (struct cn10k_eth_txq *)
>                 txq_data[m->port][rte_event_eth_tx_adapter_txq_get(m)];
>  }
>
> +static __rte_always_inline void
> +cn10k_sso_vwqe_split_tx(struct rte_mbuf **mbufs, uint16_t nb_mbufs,
> +                       uint64_t *cmd, uint16_t lmt_id, uintptr_t lmt_addr,
> +                       uint8_t sched_type, uintptr_t base,
> +                       const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT],
> +                       const uint32_t flags)
> +{
> +       uint16_t port[4], queue[4];
> +       struct cn10k_eth_txq *txq;
> +       uint16_t i, j;
> +       uintptr_t pa;
> +
> +       for (i = 0; i < nb_mbufs; i += 4) {
> +               port[0] = mbufs[i]->port;
> +               port[1] = mbufs[i + 1]->port;
> +               port[2] = mbufs[i + 2]->port;
> +               port[3] = mbufs[i + 3]->port;
> +
> +               queue[0] = rte_event_eth_tx_adapter_txq_get(mbufs[i]);
> +               queue[1] = rte_event_eth_tx_adapter_txq_get(mbufs[i + 1]);
> +               queue[2] = rte_event_eth_tx_adapter_txq_get(mbufs[i + 2]);
> +               queue[3] = rte_event_eth_tx_adapter_txq_get(mbufs[i + 3]);
> +
> +               if (((port[0] ^ port[1]) & (port[2] ^ port[3])) ||
> +                   ((queue[0] ^ queue[1]) & (queue[2] ^ queue[3]))) {
> +
> +                       for (j = 0; j < 4; j++) {
> +                               struct rte_mbuf *m = mbufs[i + j];
> +
> +                               txq = (struct cn10k_eth_txq *)
> +                                       txq_data[port[j]][queue[j]];
> +                               cn10k_nix_tx_skeleton(txq, cmd, flags);
> +                               /* Perform header writes before barrier
> +                                * for TSO
> +                                */
> +                               if (flags & NIX_TX_OFFLOAD_TSO_F)
> +                                       cn10k_nix_xmit_prepare_tso(m, flags);
> +
> +                               cn10k_nix_xmit_prepare(m, cmd, lmt_addr, flags,
> +                                                      txq->lso_tun_fmt);
> +                               if (flags & NIX_TX_MULTI_SEG_F) {
> +                                       const uint16_t segdw =
> +                                               cn10k_nix_prepare_mseg(
> +                                                       m, (uint64_t *)lmt_addr,
> +                                                       flags);
> +                                       pa = txq->io_addr | ((segdw - 1) << 4);
> +                               } else {
> +                                       pa = txq->io_addr |
> +                                            (cn10k_nix_tx_ext_subs(flags) + 1)
> +                                                    << 4;
> +                               }
> +                               if (!sched_type)
> +                                       roc_sso_hws_head_wait(base +
> +                                                             SSOW_LF_GWS_TAG);
> +
> +                               roc_lmt_submit_steorl(lmt_id, pa);
> +                       }
> +               } else {
> +                       txq = (struct cn10k_eth_txq *)
> +                               txq_data[port[0]][queue[0]];
> +                       cn10k_nix_xmit_pkts_vector(txq, &mbufs[i], 4, cmd, base
> +                                       + SSOW_LF_GWS_TAG,
> +                                                  flags | NIX_TX_VWQE_F);
> +               }
> +       }
> +}
> +
>  static __rte_always_inline uint16_t
>  cn10k_sso_hws_event_tx(struct cn10k_sso_hws *ws, struct rte_event *ev,
>                        uint64_t *cmd,
>                        const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT],
>                        const uint32_t flags)
>  {
> -       const struct cn10k_eth_txq *txq;
> -       struct rte_mbuf *m = ev->mbuf;
> -       uint16_t ref_cnt = m->refcnt;
> +       struct cn10k_eth_txq *txq;
> +       struct rte_mbuf *m;
>         uintptr_t lmt_addr;
> +       uint16_t ref_cnt;
>         uint16_t lmt_id;
>         uintptr_t pa;
>
>         lmt_addr = ws->lmt_base;
>         ROC_LMT_BASE_ID_GET(lmt_addr, lmt_id);
> +
> +       if (ev->event_type & RTE_EVENT_TYPE_VECTOR) {
> +               struct rte_mbuf **mbufs = ev->vec->mbufs;
> +               uint64_t meta = *(uint64_t *)ev->vec;
> +
> +               if (meta & BIT(31)) {
> +                       txq = (struct cn10k_eth_txq *)
> +                               txq_data[meta >> 32][meta >> 48];
> +
> +                       cn10k_nix_xmit_pkts_vector(
> +                               txq, mbufs, meta & 0xFFFF, cmd,
> +                               ws->tx_base + SSOW_LF_GWS_TAG,
> +                               flags | NIX_TX_VWQE_F);
> +               } else {
> +                       cn10k_sso_vwqe_split_tx(
> +                               mbufs, meta & 0xFFFF, cmd, lmt_id, lmt_addr,
> +                               ev->sched_type, ws->tx_base, txq_data, flags);
> +               }
> +               rte_mempool_put(rte_mempool_from_obj(ev->vec), ev->vec);
> +               return (meta & 0xFFFF);
> +       }
> +
> +       m = ev->mbuf;
> +       ref_cnt = m->refcnt;
>         txq = cn10k_sso_hws_xtract_meta(m, txq_data);
>         cn10k_nix_tx_skeleton(txq, cmd, flags);
>         /* Perform header writes before barrier for TSO */
> @@ -346,7 +437,7 @@ cn10k_sso_hws_event_tx(struct cn10k_sso_hws *ws, struct rte_event *ev,
>                 pa = txq->io_addr | (cn10k_nix_tx_ext_subs(flags) + 1) << 4;
>         }
>         if (!ev->sched_type)
> -               cnxk_sso_hws_head_wait(ws->tx_base + SSOW_LF_GWS_TAG);
> +               roc_sso_hws_head_wait(ws->tx_base + SSOW_LF_GWS_TAG);
>
>         roc_lmt_submit_steorl(lmt_id, pa);
>
> @@ -357,7 +448,6 @@ cn10k_sso_hws_event_tx(struct cn10k_sso_hws *ws, struct rte_event *ev,
>
>         cnxk_sso_hws_swtag_flush(ws->tx_base + SSOW_LF_GWS_TAG,
>                                  ws->tx_base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
> -
>         return 1;
>  }
>
> diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
> index 3f9751211a..cc1e141957 100644
> --- a/drivers/event/cnxk/cn9k_worker.h
> +++ b/drivers/event/cnxk/cn9k_worker.h
> @@ -466,7 +466,7 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
>                 const uint16_t segdw = cn9k_nix_prepare_mseg(m, cmd, flags);
>                 if (!CNXK_TT_FROM_EVENT(ev->event)) {
>                         cn9k_nix_xmit_mseg_prep_lmt(cmd, txq->lmt_addr, segdw);
> -                       cnxk_sso_hws_head_wait(base + SSOW_LF_GWS_TAG);
> +                       roc_sso_hws_head_wait(base + SSOW_LF_GWS_TAG);
>                         cn9k_sso_txq_fc_wait(txq);
>                         if (cn9k_nix_xmit_submit_lmt(txq->io_addr) == 0)
>                                 cn9k_nix_xmit_mseg_one(cmd, txq->lmt_addr,
> @@ -478,7 +478,7 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
>         } else {
>                 if (!CNXK_TT_FROM_EVENT(ev->event)) {
>                         cn9k_nix_xmit_prep_lmt(cmd, txq->lmt_addr, flags);
> -                       cnxk_sso_hws_head_wait(base + SSOW_LF_GWS_TAG);
> +                       roc_sso_hws_head_wait(base + SSOW_LF_GWS_TAG);
>                         cn9k_sso_txq_fc_wait(txq);
>                         if (cn9k_nix_xmit_submit_lmt(txq->io_addr) == 0)
>                                 cn9k_nix_xmit_one(cmd, txq->lmt_addr,
> diff --git a/drivers/event/cnxk/cnxk_worker.h b/drivers/event/cnxk/cnxk_worker.h
> index 7891b749df..9f9ceab8a1 100644
> --- a/drivers/event/cnxk/cnxk_worker.h
> +++ b/drivers/event/cnxk/cnxk_worker.h
> @@ -75,26 +75,4 @@ cnxk_sso_hws_swtag_wait(uintptr_t tag_op)
>  #endif
>  }
>
> -static __rte_always_inline void
> -cnxk_sso_hws_head_wait(uintptr_t tag_op)
> -{
> -#ifdef RTE_ARCH_ARM64
> -       uint64_t tag;
> -
> -       asm volatile("       ldr %[tag], [%[tag_op]]         \n"
> -                    "       tbnz %[tag], 35, done%=         \n"
> -                    "       sevl                            \n"
> -                    "rty%=: wfe                             \n"
> -                    "       ldr %[tag], [%[tag_op]]         \n"
> -                    "       tbz %[tag], 35, rty%=           \n"
> -                    "done%=:                                \n"
> -                    : [tag] "=&r"(tag)
> -                    : [tag_op] "r"(tag_op));
> -#else
> -       /* Wait for the HEAD to be set */
> -       while (!(plt_read64(tag_op) & BIT_ULL(35)))
> -               ;
> -#endif
> -}
> -
>  #endif
> diff --git a/drivers/net/cnxk/cn10k_tx.c b/drivers/net/cnxk/cn10k_tx.c
> index 1f30bab59a..0e1276c60b 100644
> --- a/drivers/net/cnxk/cn10k_tx.c
> +++ b/drivers/net/cnxk/cn10k_tx.c
> @@ -16,7 +16,7 @@
>                     !((flags) & NIX_TX_OFFLOAD_L3_L4_CSUM_F))                  \
>                         return 0;                                              \
>                 return cn10k_nix_xmit_pkts(tx_queue, tx_pkts, pkts, cmd,       \
> -                                          flags);                             \
> +                                          0, flags);                          \
>         }
>
>  NIX_TX_FASTPATH_MODES
> diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
> index eb148b8e77..f75cae07ae 100644
> --- a/drivers/net/cnxk/cn10k_tx.h
> +++ b/drivers/net/cnxk/cn10k_tx.h
> @@ -18,6 +18,7 @@
>   * Defining it from backwards to denote its been
>   * not used as offload flags to pick function
>   */
> +#define NIX_TX_VWQE_F     BIT(14)
>  #define NIX_TX_MULTI_SEG_F BIT(15)
>
>  #define NIX_TX_NEED_SEND_HDR_W1                                                \
> @@ -519,7 +520,7 @@ cn10k_nix_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
>
>  static __rte_always_inline uint16_t
>  cn10k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts,
> -                   uint64_t *cmd, const uint16_t flags)
> +                   uint64_t *cmd, uintptr_t base, const uint16_t flags)
>  {
>         struct cn10k_eth_txq *txq = tx_queue;
>         const rte_iova_t io_addr = txq->io_addr;
> @@ -528,14 +529,15 @@ cn10k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts,
>         uint64_t lso_tun_fmt;
>         uint64_t data;
>
> -       NIX_XMIT_FC_OR_RETURN(txq, pkts);
> +       if (!(flags & NIX_TX_VWQE_F)) {
> +               NIX_XMIT_FC_OR_RETURN(txq, pkts);
> +               /* Reduce the cached count */
> +               txq->fc_cache_pkts -= pkts;
> +       }
>
>         /* Get cmd skeleton */
>         cn10k_nix_tx_skeleton(txq, cmd, flags);
>
> -       /* Reduce the cached count */
> -       txq->fc_cache_pkts -= pkts;
> -
>         if (flags & NIX_TX_OFFLOAD_TSO_F)
>                 lso_tun_fmt = txq->lso_tun_fmt;
>
> @@ -558,6 +560,9 @@ cn10k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts,
>                 lmt_addr += (1ULL << ROC_LMT_LINE_SIZE_LOG2);
>         }
>
> +       if (flags & NIX_TX_VWQE_F)
> +               roc_sso_hws_head_wait(base);
> +
>         /* Trigger LMTST */
>         if (burst > 16) {
>                 data = cn10k_nix_tx_steor_data(flags);
> @@ -604,7 +609,8 @@ cn10k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts,
>
>  static __rte_always_inline uint16_t
>  cn10k_nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
> -                        uint16_t pkts, uint64_t *cmd, const uint16_t flags)
> +                        uint16_t pkts, uint64_t *cmd, uintptr_t base,
> +                        const uint16_t flags)
>  {
>         struct cn10k_eth_txq *txq = tx_queue;
>         uintptr_t pa0, pa1, lmt_addr = txq->lmt_base;
> @@ -652,6 +658,9 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
>                 shft += 3;
>         }
>
> +       if (flags & NIX_TX_VWQE_F)
> +               roc_sso_hws_head_wait(base);
> +
>         data0 = (uint64_t)data128;
>         data1 = (uint64_t)(data128 >> 64);
>         /* Make data0 similar to data1 */
> @@ -984,7 +993,8 @@ cn10k_nix_prep_lmt_mseg_vector(struct rte_mbuf **mbufs, uint64x2_t *cmd0,
>
>  static __rte_always_inline uint16_t
>  cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
> -                          uint16_t pkts, uint64_t *cmd, const uint16_t flags)
> +                          uint16_t pkts, uint64_t *cmd, uintptr_t base,
> +                          const uint16_t flags)
>  {
>         uint64x2_t dataoff_iova0, dataoff_iova1, dataoff_iova2, dataoff_iova3;
>         uint64x2_t len_olflags0, len_olflags1, len_olflags2, len_olflags3;
> @@ -1013,13 +1023,17 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
>                 uint64_t data[2];
>         } wd;
>
> -       NIX_XMIT_FC_OR_RETURN(txq, pkts);
> -
> -       scalar = pkts & (NIX_DESCS_PER_LOOP - 1);
> -       pkts = RTE_ALIGN_FLOOR(pkts, NIX_DESCS_PER_LOOP);
> +       if (!(flags & NIX_TX_VWQE_F)) {
> +               NIX_XMIT_FC_OR_RETURN(txq, pkts);
> +               scalar = pkts & (NIX_DESCS_PER_LOOP - 1);
> +               pkts = RTE_ALIGN_FLOOR(pkts, NIX_DESCS_PER_LOOP);
> +               /* Reduce the cached count */
> +               txq->fc_cache_pkts -= pkts;
> +       } else {
> +               scalar = pkts & (NIX_DESCS_PER_LOOP - 1);
> +               pkts = RTE_ALIGN_FLOOR(pkts, NIX_DESCS_PER_LOOP);
> +       }
>
> -       /* Reduce the cached count */
> -       txq->fc_cache_pkts -= pkts;
>         /* Perform header writes before barrier for TSO */
>         if (flags & NIX_TX_OFFLOAD_TSO_F) {
>                 for (i = 0; i < pkts; i++)
> @@ -1973,6 +1987,9 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
>         if (flags & NIX_TX_MULTI_SEG_F)
>                 wd.data[0] >>= 16;
>
> +       if (flags & NIX_TX_VWQE_F)
> +               roc_sso_hws_head_wait(base);
> +
>         /* Trigger LMTST */
>         if (lnum > 16) {
>                 if (!(flags & NIX_TX_MULTI_SEG_F))
> @@ -2029,10 +2046,11 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
>         if (unlikely(scalar)) {
>                 if (flags & NIX_TX_MULTI_SEG_F)
>                         pkts += cn10k_nix_xmit_pkts_mseg(tx_queue, tx_pkts,
> -                                                        scalar, cmd, flags);
> +                                                        scalar, cmd, base,
> +                                                        flags);
>                 else
>                         pkts += cn10k_nix_xmit_pkts(tx_queue, tx_pkts, scalar,
> -                                                   cmd, flags);
> +                                                   cmd, base, flags);
>         }
>
>         return pkts;
> @@ -2041,13 +2059,15 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
>  #else
>  static __rte_always_inline uint16_t
>  cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
> -                          uint16_t pkts, uint64_t *cmd, const uint16_t flags)
> +                          uint16_t pkts, uint64_t *cmd, uintptr_t base,
> +                          const uint16_t flags)
>  {
>         RTE_SET_USED(tx_queue);
>         RTE_SET_USED(tx_pkts);
>         RTE_SET_USED(pkts);
>         RTE_SET_USED(cmd);
>         RTE_SET_USED(flags);
> +       RTE_SET_USED(base);
>         return 0;
>  }
>  #endif
> diff --git a/drivers/net/cnxk/cn10k_tx_mseg.c b/drivers/net/cnxk/cn10k_tx_mseg.c
> index 33f6754722..4ea4c8a4e5 100644
> --- a/drivers/net/cnxk/cn10k_tx_mseg.c
> +++ b/drivers/net/cnxk/cn10k_tx_mseg.c
> @@ -18,7 +18,8 @@
>                     !((flags) & NIX_TX_OFFLOAD_L3_L4_CSUM_F))                  \
>                         return 0;                                              \
>                 return cn10k_nix_xmit_pkts_mseg(tx_queue, tx_pkts, pkts, cmd,  \
> -                                               (flags) | NIX_TX_MULTI_SEG_F); \
> +                                               0, (flags)                     \
> +                                                       | NIX_TX_MULTI_SEG_F); \
>         }
>
>  NIX_TX_FASTPATH_MODES
> diff --git a/drivers/net/cnxk/cn10k_tx_vec.c b/drivers/net/cnxk/cn10k_tx_vec.c
> index 34e3737501..a0350496ab 100644
> --- a/drivers/net/cnxk/cn10k_tx_vec.c
> +++ b/drivers/net/cnxk/cn10k_tx_vec.c
> @@ -18,7 +18,7 @@
>                     !((flags) & NIX_TX_OFFLOAD_L3_L4_CSUM_F))                  \
>                         return 0;                                              \
>                 return cn10k_nix_xmit_pkts_vector(tx_queue, tx_pkts, pkts, cmd,\
> -                                                 (flags));                    \
> +                                                 0, (flags));                 \
>         }
>
>  NIX_TX_FASTPATH_MODES
> diff --git a/drivers/net/cnxk/cn10k_tx_vec_mseg.c b/drivers/net/cnxk/cn10k_tx_vec_mseg.c
> index 1fad81dbad..7f98f79b97 100644
> --- a/drivers/net/cnxk/cn10k_tx_vec_mseg.c
> +++ b/drivers/net/cnxk/cn10k_tx_vec_mseg.c
> @@ -16,7 +16,7 @@
>                     !((flags) & NIX_TX_OFFLOAD_L3_L4_CSUM_F))                  \
>                         return 0;                                              \
>                 return cn10k_nix_xmit_pkts_vector(                             \
> -                       tx_queue, tx_pkts, pkts, cmd,                          \
> +                       tx_queue, tx_pkts, pkts, cmd, 0,                       \
>                         (flags) | NIX_TX_MULTI_SEG_F);                         \
>         }
>
> --
> 2.17.1
>

  reply	other threads:[~2021-07-13 13:36 UTC|newest]

Thread overview: 93+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-24 12:22 [dpdk-dev] [PATCH v2 1/4] event/cnxk: add Rx adapter support pbhagavatula
2021-05-24 12:23 ` [dpdk-dev] [PATCH v2 2/4] event/cnxk: add Rx adapter fastpath ops pbhagavatula
2021-05-24 12:23 ` [dpdk-dev] [PATCH v2 3/4] event/cnxk: add Tx adapter support pbhagavatula
2021-05-24 12:23 ` [dpdk-dev] [PATCH v2 4/4] event/cnxk: add Tx adapter fastpath ops pbhagavatula
2021-06-19 11:01 ` [dpdk-dev] [PATCH v2 01/13] net/cnxk: add multi seg Rx vector routine pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 02/13] net/cnxk: enable ptp processing in vector Rx pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 03/13] net/cnxk: enable VLAN processing in vector Tx pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 04/13] net/cnxk: enable ptp " pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 05/13] net/cnxk: enable TSO " pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 06/13] net/cnxk: add multi seg Tx vector routine pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 07/13] event/cnxk: add Rx adapter support pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 08/13] event/cnxk: add Rx adapter fastpath ops pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 09/13] event/cnxk: add Tx adapter support pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 10/13] event/cnxk: add Tx adapter fastpath ops pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 11/13] event/cnxk: add Rx adapter vector support pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 12/13] event/cnxk: add Rx event vector fastpath pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 13/13] event/cnxk: add Tx " pbhagavatula
2021-06-20 20:28   ` [dpdk-dev] [PATCH v3 01/13] net/cnxk: add multi seg Rx vector routine pbhagavatula
2021-06-20 20:28     ` [dpdk-dev] [PATCH v3 02/13] net/cnxk: enable ptp processing in vector Rx pbhagavatula
2021-06-20 20:28     ` [dpdk-dev] [PATCH v3 03/13] net/cnxk: enable VLAN processing in vector Tx pbhagavatula
2021-06-20 20:28     ` [dpdk-dev] [PATCH v3 04/13] net/cnxk: enable ptp " pbhagavatula
2021-06-20 20:28     ` [dpdk-dev] [PATCH v3 05/13] net/cnxk: enable TSO " pbhagavatula
2021-06-20 20:28     ` [dpdk-dev] [PATCH v3 06/13] net/cnxk: add multi seg Tx vector routine pbhagavatula
2021-06-20 20:29     ` [dpdk-dev] [PATCH v3 07/13] event/cnxk: add Rx adapter support pbhagavatula
2021-06-20 20:29     ` [dpdk-dev] [PATCH v3 08/13] event/cnxk: add Rx adapter fastpath ops pbhagavatula
2021-06-20 20:29     ` [dpdk-dev] [PATCH v3 09/13] event/cnxk: add Tx adapter support pbhagavatula
2021-06-20 20:29     ` [dpdk-dev] [PATCH v3 10/13] event/cnxk: add Tx adapter fastpath ops pbhagavatula
2021-06-20 20:29     ` [dpdk-dev] [PATCH v3 11/13] event/cnxk: add Rx adapter vector support pbhagavatula
2021-06-20 20:29     ` [dpdk-dev] [PATCH v3 12/13] event/cnxk: add Rx event vector fastpath pbhagavatula
2021-06-20 20:29     ` [dpdk-dev] [PATCH v3 13/13] event/cnxk: add Tx " pbhagavatula
2021-06-27  6:57     ` [dpdk-dev] [PATCH v3 01/13] net/cnxk: add multi seg Rx vector routine Jerin Jacob
2021-06-28 19:41     ` [dpdk-dev] [PATCH v4 1/6] " pbhagavatula
2021-06-28 19:41       ` [dpdk-dev] [PATCH v4 2/6] net/cnxk: enable ptp processing in vector Rx pbhagavatula
2021-06-28 19:41       ` [dpdk-dev] [PATCH v4 3/6] net/cnxk: enable VLAN processing in vector Tx pbhagavatula
2021-06-28 19:41       ` [dpdk-dev] [PATCH v4 4/6] net/cnxk: enable ptp " pbhagavatula
2021-06-28 19:41       ` [dpdk-dev] [PATCH v4 5/6] net/cnxk: enable TSO " pbhagavatula
2021-06-28 19:41       ` [dpdk-dev] [PATCH v4 6/6] net/cnxk: add multi seg Tx vector routine pbhagavatula
2021-06-29  7:25         ` Nithin Dabilpuram
2021-06-29  7:44       ` [dpdk-dev] [PATCH v5 1/6] net/cnxk: add multi seg Rx " pbhagavatula
2021-06-29  7:44         ` [dpdk-dev] [PATCH v5 2/6] net/cnxk: enable ptp processing in vector Rx pbhagavatula
2021-06-29  7:44         ` [dpdk-dev] [PATCH v5 3/6] net/cnxk: enable VLAN processing in vector Tx pbhagavatula
2021-06-29  7:44         ` [dpdk-dev] [PATCH v5 4/6] net/cnxk: enable ptp " pbhagavatula
2021-06-29  7:44         ` [dpdk-dev] [PATCH v5 5/6] net/cnxk: enable TSO " pbhagavatula
2021-06-29  7:44         ` [dpdk-dev] [PATCH v5 6/6] net/cnxk: add multi seg Tx vector routine pbhagavatula
2021-06-29 16:20         ` [dpdk-dev] [PATCH v5 1/6] net/cnxk: add multi seg Rx " Jerin Jacob
2021-06-28 19:52     ` [dpdk-dev] [PATCH v4 1/7] event/cnxk: add Rx adapter support pbhagavatula
2021-06-28 19:52       ` [dpdk-dev] [PATCH v4 2/7] event/cnxk: add Rx adapter fastpath ops pbhagavatula
2021-06-28 19:52       ` [dpdk-dev] [PATCH v4 3/7] event/cnxk: add Tx adapter support pbhagavatula
2021-06-28 19:52       ` [dpdk-dev] [PATCH v4 4/7] event/cnxk: add Tx adapter fastpath ops pbhagavatula
2021-06-28 19:52       ` [dpdk-dev] [PATCH v4 5/7] event/cnxk: add Rx adapter vector support pbhagavatula
2021-06-28 19:52       ` [dpdk-dev] [PATCH v4 6/7] event/cnxk: add Rx event vector fastpath pbhagavatula
2021-06-28 19:52       ` [dpdk-dev] [PATCH v4 7/7] event/cnxk: add Tx " pbhagavatula
2021-06-29  8:01       ` [dpdk-dev] [PATCH v5 1/7] event/cnxk: add Rx adapter support pbhagavatula
2021-06-29  8:01         ` [dpdk-dev] [PATCH v5 2/7] event/cnxk: add Rx adapter fastpath ops pbhagavatula
2021-06-29  8:01         ` [dpdk-dev] [PATCH v5 3/7] event/cnxk: add Tx adapter support pbhagavatula
2021-06-29  8:01         ` [dpdk-dev] [PATCH v5 4/7] event/cnxk: add Tx adapter fastpath ops pbhagavatula
2021-06-29  8:01         ` [dpdk-dev] [PATCH v5 5/7] event/cnxk: add Rx adapter vector support pbhagavatula
2021-06-29  8:01         ` [dpdk-dev] [PATCH v5 6/7] event/cnxk: add Rx event vector fastpath pbhagavatula
2021-06-29  8:01         ` [dpdk-dev] [PATCH v5 7/7] event/cnxk: add Tx " pbhagavatula
2021-07-02 21:14         ` [dpdk-dev] [PATCH v6 1/7] event/cnxk: add Rx adapter support pbhagavatula
2021-07-02 21:14           ` [dpdk-dev] [PATCH v6 2/7] event/cnxk: add Rx adapter fastpath ops pbhagavatula
2021-07-02 21:14           ` [dpdk-dev] [PATCH v6 3/7] event/cnxk: add Tx adapter support pbhagavatula
2021-07-03 13:23             ` Nithin Dabilpuram
2021-07-02 21:14           ` [dpdk-dev] [PATCH v6 4/7] event/cnxk: add Tx adapter fastpath ops pbhagavatula
2021-07-02 21:14           ` [dpdk-dev] [PATCH v6 5/7] event/cnxk: add Rx adapter vector support pbhagavatula
2021-07-02 21:14           ` [dpdk-dev] [PATCH v6 6/7] event/cnxk: add Rx event vector fastpath pbhagavatula
2021-07-02 21:14           ` [dpdk-dev] [PATCH v6 7/7] event/cnxk: add Tx " pbhagavatula
2021-07-03 22:00           ` [dpdk-dev] [PATCH v7 1/7] event/cnxk: add Rx adapter support pbhagavatula
2021-07-03 22:00             ` [dpdk-dev] [PATCH v7 2/7] event/cnxk: add Rx adapter fastpath ops pbhagavatula
2021-07-03 22:00             ` [dpdk-dev] [PATCH v7 3/7] event/cnxk: add Tx adapter support pbhagavatula
2021-07-03 22:00             ` [dpdk-dev] [PATCH v7 4/7] event/cnxk: add Tx adapter fastpath ops pbhagavatula
2021-07-03 22:00             ` [dpdk-dev] [PATCH v7 5/7] event/cnxk: add Rx adapter vector support pbhagavatula
2021-07-03 22:00             ` [dpdk-dev] [PATCH v7 6/7] event/cnxk: add Rx event vector fastpath pbhagavatula
2021-07-03 22:00             ` [dpdk-dev] [PATCH v7 7/7] event/cnxk: add Tx " pbhagavatula
2021-07-11 23:29             ` [dpdk-dev] [PATCH v8 1/7] event/cnxk: add Rx adapter support pbhagavatula
2021-07-11 23:29               ` [dpdk-dev] [PATCH v8 2/7] event/cnxk: add Rx adapter fastpath ops pbhagavatula
2021-07-11 23:29               ` [dpdk-dev] [PATCH v8 3/7] event/cnxk: add Tx adapter support pbhagavatula
2021-07-11 23:29               ` [dpdk-dev] [PATCH v8 4/7] event/cnxk: add Tx adapter fastpath ops pbhagavatula
2021-07-11 23:29               ` [dpdk-dev] [PATCH v8 5/7] event/cnxk: add Rx adapter vector support pbhagavatula
2021-07-11 23:29               ` [dpdk-dev] [PATCH v8 6/7] event/cnxk: add Rx event vector fastpath pbhagavatula
2021-07-11 23:29               ` [dpdk-dev] [PATCH v8 7/7] event/cnxk: add Tx " pbhagavatula
2021-07-13 13:36                 ` Jerin Jacob [this message]
2021-07-14  9:02               ` [dpdk-dev] [PATCH v9 1/7] event/cnxk: add Rx adapter support pbhagavatula
2021-07-14  9:02                 ` [dpdk-dev] [PATCH v9 2/7] event/cnxk: add Rx adapter fastpath ops pbhagavatula
2021-07-20 11:03                   ` David Marchand
2021-07-20 11:43                     ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula
2021-07-20 11:50                       ` David Marchand
2021-07-14  9:02                 ` [dpdk-dev] [PATCH v9 3/7] event/cnxk: add Tx adapter support pbhagavatula
2021-07-14  9:02                 ` [dpdk-dev] [PATCH v9 4/7] event/cnxk: add Tx adapter fastpath ops pbhagavatula
2021-07-14  9:02                 ` [dpdk-dev] [PATCH v9 5/7] event/cnxk: add Rx adapter vector support pbhagavatula
2021-07-14  9:02                 ` [dpdk-dev] [PATCH v9 6/7] event/cnxk: add Rx event vector fastpath pbhagavatula
2021-07-14  9:02                 ` [dpdk-dev] [PATCH v9 7/7] event/cnxk: add Tx " pbhagavatula
2021-07-16 12:19                   ` Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='CALBAE1MytR=nJYaj2=4vxLy4CFAJ3CPBnLhScao2AOW0MWqZuA@mail.gmail.com' \
    --to=jerinjacobk@gmail.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=kirankumark@marvell.com \
    --cc=ndabilpuram@marvell.com \
    --cc=pbhagavatula@marvell.com \
    --cc=skori@marvell.com \
    --cc=skoteshwar@marvell.com \
    --cc=sthotton@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.