All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Zhang, Qi Z" <qi.z.zhang@intel.com>
To: "Xing, Beilei" <beilei.xing@intel.com>,
	"Wu, Jingjing" <jingjing.wu@intel.com>
Cc: "dev@dpdk.org" <dev@dpdk.org>, "Wu, Wenjun1" <wenjun1.wu@intel.com>
Subject: RE: [PATCH v4 03/15] common/idpf: add virtual channel functions
Date: Wed, 18 Jan 2023 04:10:20 +0000	[thread overview]
Message-ID: <DM4PR11MB5994DB0D1A14506388DE12EED7C79@DM4PR11MB5994.namprd11.prod.outlook.com> (raw)
In-Reply-To: <DM4PR11MB59947C7C9B63D921E1334366D7C79@DM4PR11MB5994.namprd11.prod.outlook.com>



> -----Original Message-----
> From: Zhang, Qi Z
> Sent: Wednesday, January 18, 2023 12:00 PM
> To: Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Wu, Wenjun1 <Wenjun1.Wu@intel.com>
> Subject: RE: [PATCH v4 03/15] common/idpf: add virtual channel functions
> 
> 
> 
> > -----Original Message-----
> > From: Xing, Beilei <beilei.xing@intel.com>
> > Sent: Tuesday, January 17, 2023 4:06 PM
> > To: Wu, Jingjing <jingjing.wu@intel.com>
> > Cc: dev@dpdk.org; Zhang, Qi Z <qi.z.zhang@intel.com>; Xing, Beilei
> > <beilei.xing@intel.com>; Wu, Wenjun1 <wenjun1.wu@intel.com>
> > Subject: [PATCH v4 03/15] common/idpf: add virtual channel functions
> >
> > From: Beilei Xing <beilei.xing@intel.com>
> >
> > Move most of the virtual channel functions to idpf common module.
> >
> > Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > ---
> >  drivers/common/idpf/base/meson.build       |   2 +-
> >  drivers/common/idpf/idpf_common_device.c   |   8 +
> >  drivers/common/idpf/idpf_common_device.h   |  61 ++
> >  drivers/common/idpf/idpf_common_logs.h     |  23 +
> >  drivers/common/idpf/idpf_common_virtchnl.c | 815
> > +++++++++++++++++++++
> >  drivers/common/idpf/idpf_common_virtchnl.h |  48 ++
> >  drivers/common/idpf/meson.build            |   5 +
> >  drivers/common/idpf/version.map            |  20 +-
> >  drivers/net/idpf/idpf_ethdev.c             |   9 +-
> >  drivers/net/idpf/idpf_ethdev.h             |  85 +--
> >  drivers/net/idpf/idpf_vchnl.c              | 815 +--------------------
> >  11 files changed, 983 insertions(+), 908 deletions(-)  create mode
> > 100644 drivers/common/idpf/idpf_common_device.c
> >  create mode 100644 drivers/common/idpf/idpf_common_logs.h
> >  create mode 100644 drivers/common/idpf/idpf_common_virtchnl.c
> >  create mode 100644 drivers/common/idpf/idpf_common_virtchnl.h
> >
> > diff --git a/drivers/common/idpf/base/meson.build
> > b/drivers/common/idpf/base/meson.build
> > index 183587b51a..dc4b93c198 100644
> > --- a/drivers/common/idpf/base/meson.build
> > +++ b/drivers/common/idpf/base/meson.build
> > @@ -1,7 +1,7 @@
> >  # SPDX-License-Identifier: BSD-3-Clause  # Copyright(c) 2022 Intel
> > Corporation
> >
> > -sources = files(
> > +sources += files(
> >          'idpf_common.c',
> >          'idpf_controlq.c',
> >          'idpf_controlq_setup.c',
> > diff --git a/drivers/common/idpf/idpf_common_device.c
> > b/drivers/common/idpf/idpf_common_device.c
> > new file mode 100644
> > index 0000000000..5062780362
> > --- /dev/null
> > +++ b/drivers/common/idpf/idpf_common_device.c
> > @@ -0,0 +1,8 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(c) 2022 Intel Corporation  */
> > +
> > +#include <rte_log.h>
> > +#include <idpf_common_device.h>
> > +
> > +RTE_LOG_REGISTER_SUFFIX(idpf_common_logtype, common, NOTICE);
> > diff --git a/drivers/common/idpf/idpf_common_device.h
> > b/drivers/common/idpf/idpf_common_device.h
> > index b7fff84b25..a7537281d1 100644
> > --- a/drivers/common/idpf/idpf_common_device.h
> > +++ b/drivers/common/idpf/idpf_common_device.h
> > @@ -7,6 +7,12 @@
> >
> >  #include <base/idpf_prototype.h>
> >  #include <base/virtchnl2.h>
> > +#include <idpf_common_logs.h>
> > +
> > +#define IDPF_CTLQ_LEN		64
> > +#define IDPF_DFLT_MBX_BUF_SIZE	4096
> > +
> > +#define IDPF_MAX_PKT_TYPE	1024
> >
> >  struct idpf_adapter {
> >  	struct idpf_hw hw;
> > @@ -76,4 +82,59 @@ struct idpf_vport {
> >  	bool stopped;
> >  };
> >
> > +/* Message type read in virtual channel from PF */ enum
> > +idpf_vc_result {
> > +	IDPF_MSG_ERR = -1, /* Meet error when accessing admin queue */
> > +	IDPF_MSG_NON,      /* Read nothing from admin queue */
> > +	IDPF_MSG_SYS,      /* Read system msg from admin queue */
> > +	IDPF_MSG_CMD,      /* Read async command result */
> > +};
> > +
> > +/* structure used for sending and checking response of virtchnl ops
> > +*/ struct idpf_cmd_info {
> > +	uint32_t ops;
> > +	uint8_t *in_args;       /* buffer for sending */
> > +	uint32_t in_args_size;  /* buffer size for sending */
> > +	uint8_t *out_buffer;    /* buffer for response */
> > +	uint32_t out_size;      /* buffer size for response */
> > +};
> > +
> > +/* notify current command done. Only call in case execute
> > + * _atomic_set_cmd successfully.
> > + */
> > +static inline void
> > +notify_cmd(struct idpf_adapter *adapter, int msg_ret) {
> > +	adapter->cmd_retval = msg_ret;
> > +	/* Return value may be checked in anither thread, need to ensure
> > the coherence. */
> > +	rte_wmb();
> > +	adapter->pend_cmd = VIRTCHNL2_OP_UNKNOWN; }
> > +
> > +/* clear current command. Only call in case execute
> > + * _atomic_set_cmd successfully.
> > + */
> > +static inline void
> > +clear_cmd(struct idpf_adapter *adapter) {
> > +	/* Return value may be checked in anither thread, need to ensure
> > the coherence. */
> > +	rte_wmb();
> > +	adapter->pend_cmd = VIRTCHNL2_OP_UNKNOWN;
> > +	adapter->cmd_retval = VIRTCHNL_STATUS_SUCCESS; }
> > +
> > +/* Check there is pending cmd in execution. If none, set new command.
> > +*/ static inline bool atomic_set_cmd(struct idpf_adapter *adapter,
> > +uint32_t ops) {
> > +	uint32_t op_unk = VIRTCHNL2_OP_UNKNOWN;
> > +	bool ret = __atomic_compare_exchange(&adapter->pend_cmd,
> > &op_unk, &ops,
> > +					    0, __ATOMIC_ACQUIRE,
> > __ATOMIC_ACQUIRE);
> > +
> > +	if (!ret)
> > +		DRV_LOG(ERR, "There is incomplete cmd %d", adapter-
> > >pend_cmd);
> > +
> > +	return !ret;
> > +}
> > +
> >  #endif /* _IDPF_COMMON_DEVICE_H_ */
> > diff --git a/drivers/common/idpf/idpf_common_logs.h
> > b/drivers/common/idpf/idpf_common_logs.h
> > new file mode 100644
> > index 0000000000..fe36562769
> > --- /dev/null
> > +++ b/drivers/common/idpf/idpf_common_logs.h
> > @@ -0,0 +1,23 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(c) 2022 Intel Corporation  */
> > +
> > +#ifndef _IDPF_COMMON_LOGS_H_
> > +#define _IDPF_COMMON_LOGS_H_
> > +
> > +#include <rte_log.h>
> > +
> > +extern int idpf_common_logtype;
> > +
> > +#define DRV_LOG_RAW(level, ...)					\
> > +	rte_log(RTE_LOG_ ## level,				\
> > +		idpf_common_logtype,				\
> > +		RTE_FMT("%s(): "				\
> > +			RTE_FMT_HEAD(__VA_ARGS__,) "\n",	\
> > +			__func__,				\
> > +			RTE_FMT_TAIL(__VA_ARGS__,)))
> > +
> > +#define DRV_LOG(level, fmt, args...)		\
> > +	DRV_LOG_RAW(level, fmt "\n", ## args)
> > +
> > +#endif /* _IDPF_COMMON_LOGS_H_ */
> > diff --git a/drivers/common/idpf/idpf_common_virtchnl.c
> > b/drivers/common/idpf/idpf_common_virtchnl.c
> > new file mode 100644
> > index 0000000000..2e94a95876
> > --- /dev/null
> > +++ b/drivers/common/idpf/idpf_common_virtchnl.c
> > @@ -0,0 +1,815 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(c) 2022 Intel Corporation  */
> > +
> > +#include <idpf_common_virtchnl.h>
> > +#include <idpf_common_logs.h>
> > +
> > +static int
> > +idpf_vc_clean(struct idpf_adapter *adapter) {
> > +	struct idpf_ctlq_msg *q_msg[IDPF_CTLQ_LEN];
> > +	uint16_t num_q_msg = IDPF_CTLQ_LEN;
> > +	struct idpf_dma_mem *dma_mem;
> > +	int err;
> > +	uint32_t i;
> > +
> > +	for (i = 0; i < 10; i++) {
> > +		err = idpf_ctlq_clean_sq(adapter->hw.asq, &num_q_msg,
> > q_msg);
> > +		msleep(20);
> > +		if (num_q_msg > 0)
> > +			break;
> > +	}
> > +	if (err != 0)
> > +		return err;
> > +
> > +	/* Empty queue is not an error */
> > +	for (i = 0; i < num_q_msg; i++) {
> > +		dma_mem = q_msg[i]->ctx.indirect.payload;
> > +		if (dma_mem != NULL) {
> > +			idpf_free_dma_mem(&adapter->hw, dma_mem);
> > +			rte_free(dma_mem);
> > +		}
> > +		rte_free(q_msg[i]);
> > +	}
> > +
> > +	return 0;
> > +}
> > +
> > +static int
> > +idpf_send_vc_msg(struct idpf_adapter *adapter, uint32_t op,
> > +		 uint16_t msg_size, uint8_t *msg)
> > +{
> > +	struct idpf_ctlq_msg *ctlq_msg;
> > +	struct idpf_dma_mem *dma_mem;
> > +	int err;
> > +
> > +	err = idpf_vc_clean(adapter);
> > +	if (err != 0)
> > +		goto err;
> > +
> > +	ctlq_msg = rte_zmalloc(NULL, sizeof(struct idpf_ctlq_msg), 0);
> > +	if (ctlq_msg == NULL) {
> > +		err = -ENOMEM;
> > +		goto err;
> > +	}
> > +
> > +	dma_mem = rte_zmalloc(NULL, sizeof(struct idpf_dma_mem), 0);
> > +	if (dma_mem == NULL) {
> > +		err = -ENOMEM;
> > +		goto dma_mem_error;
> > +	}
> > +
> > +	dma_mem->size = IDPF_DFLT_MBX_BUF_SIZE;
> > +	idpf_alloc_dma_mem(&adapter->hw, dma_mem, dma_mem->size);
> > +	if (dma_mem->va == NULL) {
> > +		err = -ENOMEM;
> > +		goto dma_alloc_error;
> > +	}
> > +
> > +	memcpy(dma_mem->va, msg, msg_size);
> > +
> > +	ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_pf;
> > +	ctlq_msg->func_id = 0;
> > +	ctlq_msg->data_len = msg_size;
> > +	ctlq_msg->cookie.mbx.chnl_opcode = op;
> > +	ctlq_msg->cookie.mbx.chnl_retval = VIRTCHNL_STATUS_SUCCESS;
> > +	ctlq_msg->ctx.indirect.payload = dma_mem;
> > +
> > +	err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg);
> > +	if (err != 0)
> > +		goto send_error;
> > +
> > +	return 0;
> > +
> > +send_error:
> > +	idpf_free_dma_mem(&adapter->hw, dma_mem);
> > +dma_alloc_error:
> > +	rte_free(dma_mem);
> > +dma_mem_error:
> > +	rte_free(ctlq_msg);
> > +err:
> > +	return err;
> > +}
> > +
> > +static enum idpf_vc_result
> > +idpf_read_msg_from_cp(struct idpf_adapter *adapter, uint16_t buf_len,
> > +		      uint8_t *buf)
> > +{
> > +	struct idpf_hw *hw = &adapter->hw;
> > +	struct idpf_ctlq_msg ctlq_msg;
> > +	struct idpf_dma_mem *dma_mem = NULL;
> > +	enum idpf_vc_result result = IDPF_MSG_NON;
> > +	uint32_t opcode;
> > +	uint16_t pending = 1;
> > +	int ret;
> > +
> > +	ret = idpf_ctlq_recv(hw->arq, &pending, &ctlq_msg);
> > +	if (ret != 0) {
> > +		DRV_LOG(DEBUG, "Can't read msg from AQ");
> > +		if (ret != -ENOMSG)
> > +			result = IDPF_MSG_ERR;
> > +		return result;
> > +	}
> > +
> > +	rte_memcpy(buf, ctlq_msg.ctx.indirect.payload->va, buf_len);
> > +
> > +	opcode = rte_le_to_cpu_32(ctlq_msg.cookie.mbx.chnl_opcode);
> > +	adapter->cmd_retval =
> > rte_le_to_cpu_32(ctlq_msg.cookie.mbx.chnl_retval);
> > +
> > +	DRV_LOG(DEBUG, "CQ from CP carries opcode %u, retval %d",
> > +		opcode, adapter->cmd_retval);
> > +
> > +	if (opcode == VIRTCHNL2_OP_EVENT) {
> > +		struct virtchnl2_event *ve = ctlq_msg.ctx.indirect.payload-
> > >va;
> > +
> > +		result = IDPF_MSG_SYS;
> > +		switch (ve->event) {
> > +		case VIRTCHNL2_EVENT_LINK_CHANGE:
> > +			/* TBD */
> > +			break;
> > +		default:
> > +			DRV_LOG(ERR, "%s: Unknown event %d from CP",
> > +				__func__, ve->event);
> > +			break;
> > +		}
> > +	} else {
> > +		/* async reply msg on command issued by pf previously */
> > +		result = IDPF_MSG_CMD;
> > +		if (opcode != adapter->pend_cmd) {
> > +			DRV_LOG(WARNING, "command mismatch,
> > expect %u, get %u",
> > +				adapter->pend_cmd, opcode);
> > +			result = IDPF_MSG_ERR;
> > +		}
> > +	}
> > +
> > +	if (ctlq_msg.data_len != 0)
> > +		dma_mem = ctlq_msg.ctx.indirect.payload;
> > +	else
> > +		pending = 0;
> > +
> > +	ret = idpf_ctlq_post_rx_buffs(hw, hw->arq, &pending, &dma_mem);
> > +	if (ret != 0 && dma_mem != NULL)
> > +		idpf_free_dma_mem(hw, dma_mem);
> > +
> > +	return result;
> > +}
> > +
> > +#define MAX_TRY_TIMES 200
> > +#define ASQ_DELAY_MS  10
> > +
> > +int
> > +idpf_read_one_msg(struct idpf_adapter *adapter, uint32_t ops,
> > +uint16_t
> > buf_len,
> > +		  uint8_t *buf)
> > +{
> > +	int err = 0;
> > +	int i = 0;
> > +	int ret;
> > +
> > +	do {
> > +		ret = idpf_read_msg_from_cp(adapter, buf_len, buf);
> > +		if (ret == IDPF_MSG_CMD)
> > +			break;
> > +		rte_delay_ms(ASQ_DELAY_MS);
> > +	} while (i++ < MAX_TRY_TIMES);
> > +	if (i >= MAX_TRY_TIMES ||
> > +	    adapter->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
> > +		err = -EBUSY;
> > +		DRV_LOG(ERR, "No response or return failure (%d) for
> > cmd %d",
> > +			adapter->cmd_retval, ops);
> > +	}
> > +
> > +	return err;
> > +}
> > +
> > +int
> > +idpf_execute_vc_cmd(struct idpf_adapter *adapter, struct
> > +idpf_cmd_info
> > *args)
> > +{
> > +	int err = 0;
> > +	int i = 0;
> > +	int ret;
> > +
> > +	if (atomic_set_cmd(adapter, args->ops))
> > +		return -EINVAL;
> > +
> > +	ret = idpf_send_vc_msg(adapter, args->ops, args->in_args_size, args-
> > >in_args);
> > +	if (ret != 0) {
> > +		DRV_LOG(ERR, "fail to send cmd %d", args->ops);
> > +		clear_cmd(adapter);
> > +		return ret;
> > +	}
> > +
> > +	switch (args->ops) {
> > +	case VIRTCHNL_OP_VERSION:
> > +	case VIRTCHNL2_OP_GET_CAPS:
> > +	case VIRTCHNL2_OP_CREATE_VPORT:
> > +	case VIRTCHNL2_OP_DESTROY_VPORT:
> > +	case VIRTCHNL2_OP_SET_RSS_KEY:
> > +	case VIRTCHNL2_OP_SET_RSS_LUT:
> > +	case VIRTCHNL2_OP_SET_RSS_HASH:
> > +	case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
> > +	case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
> > +	case VIRTCHNL2_OP_ENABLE_QUEUES:
> > +	case VIRTCHNL2_OP_DISABLE_QUEUES:
> > +	case VIRTCHNL2_OP_ENABLE_VPORT:
> > +	case VIRTCHNL2_OP_DISABLE_VPORT:
> > +	case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
> > +	case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
> > +	case VIRTCHNL2_OP_ALLOC_VECTORS:
> > +	case VIRTCHNL2_OP_DEALLOC_VECTORS:
> > +		/* for init virtchnl ops, need to poll the response */
> > +		err = idpf_read_one_msg(adapter, args->ops, args->out_size,
> > args->out_buffer);
> > +		clear_cmd(adapter);
> > +		break;
> > +	case VIRTCHNL2_OP_GET_PTYPE_INFO:
> > +		/* for multuple response message,
> > +		 * do not handle the response here.
> > +		 */
> > +		break;
> > +	default:
> > +		/* For other virtchnl ops in running time,
> > +		 * wait for the cmd done flag.
> > +		 */
> > +		do {
> > +			if (adapter->pend_cmd == VIRTCHNL_OP_UNKNOWN)
> > +				break;
> > +			rte_delay_ms(ASQ_DELAY_MS);
> > +			/* If don't read msg or read sys event, continue */
> > +		} while (i++ < MAX_TRY_TIMES);
> > +		/* If there's no response is received, clear command */
> > +		if (i >= MAX_TRY_TIMES  ||
> > +		    adapter->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
> > +			err = -EBUSY;
> > +			DRV_LOG(ERR, "No response or return failure (%d)
> > for cmd %d",
> > +				adapter->cmd_retval, args->ops);
> > +			clear_cmd(adapter);
> > +		}
> > +		break;
> > +	}
> > +
> > +	return err;
> > +}
> > +
> > +int
> > +idpf_vc_check_api_version(struct idpf_adapter *adapter) {
> > +	struct virtchnl2_version_info version, *pver;
> > +	struct idpf_cmd_info args;
> > +	int err;
> > +
> > +	memset(&version, 0, sizeof(struct virtchnl_version_info));
> > +	version.major = VIRTCHNL2_VERSION_MAJOR_2;
> > +	version.minor = VIRTCHNL2_VERSION_MINOR_0;
> > +
> > +	args.ops = VIRTCHNL_OP_VERSION;
> > +	args.in_args = (uint8_t *)&version;
> > +	args.in_args_size = sizeof(version);
> > +	args.out_buffer = adapter->mbx_resp;
> > +	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> > +
> > +	err = idpf_execute_vc_cmd(adapter, &args);
> > +	if (err != 0) {
> > +		DRV_LOG(ERR,
> > +			"Failed to execute command of
> > VIRTCHNL_OP_VERSION");
> > +		return err;
> > +	}
> > +
> > +	pver = (struct virtchnl2_version_info *)args.out_buffer;
> > +	adapter->virtchnl_version = *pver;
> > +
> > +	if (adapter->virtchnl_version.major !=
> > VIRTCHNL2_VERSION_MAJOR_2 ||
> > +	    adapter->virtchnl_version.minor !=
> > VIRTCHNL2_VERSION_MINOR_0) {
> > +		DRV_LOG(ERR, "VIRTCHNL API version mismatch:(%u.%u)-
> > (%u.%u)",
> > +			adapter->virtchnl_version.major,
> > +			adapter->virtchnl_version.minor,
> > +			VIRTCHNL2_VERSION_MAJOR_2,
> > +			VIRTCHNL2_VERSION_MINOR_0);
> > +		return -EINVAL;
> > +	}
> > +
> > +	return 0;
> > +}
> > +
> > +int
> > +idpf_vc_get_caps(struct idpf_adapter *adapter) {
> > +	struct virtchnl2_get_capabilities caps_msg;
> > +	struct idpf_cmd_info args;
> > +	int err;
> > +
> > +	memset(&caps_msg, 0, sizeof(struct virtchnl2_get_capabilities));
> > +
> > +	caps_msg.csum_caps =
> > +		VIRTCHNL2_CAP_TX_CSUM_L3_IPV4          |
> > +		VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP      |
> > +		VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP      |
> > +		VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP     |
> > +		VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP      |
> > +		VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP      |
> > +		VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP     |
> > +		VIRTCHNL2_CAP_TX_CSUM_GENERIC          |
> > +		VIRTCHNL2_CAP_RX_CSUM_L3_IPV4          |
> > +		VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP      |
> > +		VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP      |
> > +		VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP     |
> > +		VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP      |
> > +		VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP      |
> > +		VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP     |
> > +		VIRTCHNL2_CAP_RX_CSUM_GENERIC;
> > +
> > +	caps_msg.rss_caps =
> > +		VIRTCHNL2_CAP_RSS_IPV4_TCP             |
> > +		VIRTCHNL2_CAP_RSS_IPV4_UDP             |
> > +		VIRTCHNL2_CAP_RSS_IPV4_SCTP            |
> > +		VIRTCHNL2_CAP_RSS_IPV4_OTHER           |
> > +		VIRTCHNL2_CAP_RSS_IPV6_TCP             |
> > +		VIRTCHNL2_CAP_RSS_IPV6_UDP             |
> > +		VIRTCHNL2_CAP_RSS_IPV6_SCTP            |
> > +		VIRTCHNL2_CAP_RSS_IPV6_OTHER           |
> > +		VIRTCHNL2_CAP_RSS_IPV4_AH              |
> > +		VIRTCHNL2_CAP_RSS_IPV4_ESP             |
> > +		VIRTCHNL2_CAP_RSS_IPV4_AH_ESP          |
> > +		VIRTCHNL2_CAP_RSS_IPV6_AH              |
> > +		VIRTCHNL2_CAP_RSS_IPV6_ESP             |
> > +		VIRTCHNL2_CAP_RSS_IPV6_AH_ESP;
> > +
> > +	caps_msg.other_caps = VIRTCHNL2_CAP_WB_ON_ITR;
> > +
> > +	args.ops = VIRTCHNL2_OP_GET_CAPS;
> > +	args.in_args = (uint8_t *)&caps_msg;
> > +	args.in_args_size = sizeof(caps_msg);
> > +	args.out_buffer = adapter->mbx_resp;
> > +	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> > +
> > +	err = idpf_execute_vc_cmd(adapter, &args);
> > +	if (err != 0) {
> > +		DRV_LOG(ERR,
> > +			"Failed to execute command of
> > VIRTCHNL2_OP_GET_CAPS");
> > +		return err;
> > +	}
> > +
> > +	rte_memcpy(&adapter->caps, args.out_buffer, sizeof(caps_msg));
> > +
> > +	return 0;
> > +}
> > +
> > +int
> > +idpf_vc_create_vport(struct idpf_vport *vport,
> > +		     struct virtchnl2_create_vport *vport_req_info) {
> > +	struct idpf_adapter *adapter = vport->adapter;
> > +	struct virtchnl2_create_vport vport_msg;
> > +	struct idpf_cmd_info args;
> > +	int err = -1;
> > +
> > +	memset(&vport_msg, 0, sizeof(struct virtchnl2_create_vport));
> > +	vport_msg.vport_type = vport_req_info->vport_type;
> > +	vport_msg.txq_model = vport_req_info->txq_model;
> > +	vport_msg.rxq_model = vport_req_info->rxq_model;
> > +	vport_msg.num_tx_q = vport_req_info->num_tx_q;
> > +	vport_msg.num_tx_complq = vport_req_info->num_tx_complq;
> > +	vport_msg.num_rx_q = vport_req_info->num_rx_q;
> > +	vport_msg.num_rx_bufq = vport_req_info->num_rx_bufq;
> > +
> > +	memset(&args, 0, sizeof(args));
> > +	args.ops = VIRTCHNL2_OP_CREATE_VPORT;
> > +	args.in_args = (uint8_t *)&vport_msg;
> > +	args.in_args_size = sizeof(vport_msg);
> > +	args.out_buffer = adapter->mbx_resp;
> > +	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> > +
> > +	err = idpf_execute_vc_cmd(adapter, &args);
> > +	if (err != 0) {
> > +		DRV_LOG(ERR,
> > +			"Failed to execute command of
> > VIRTCHNL2_OP_CREATE_VPORT");
> > +		return err;
> > +	}
> > +
> > +	rte_memcpy(vport->vport_info, args.out_buffer,
> > IDPF_DFLT_MBX_BUF_SIZE);
> > +	return 0;
> > +}
> > +
> > +int
> > +idpf_vc_destroy_vport(struct idpf_vport *vport) {
> > +	struct idpf_adapter *adapter = vport->adapter;
> > +	struct virtchnl2_vport vc_vport;
> > +	struct idpf_cmd_info args;
> > +	int err;
> > +
> > +	vc_vport.vport_id = vport->vport_id;
> > +
> > +	memset(&args, 0, sizeof(args));
> > +	args.ops = VIRTCHNL2_OP_DESTROY_VPORT;
> > +	args.in_args = (uint8_t *)&vc_vport;
> > +	args.in_args_size = sizeof(vc_vport);
> > +	args.out_buffer = adapter->mbx_resp;
> > +	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> > +
> > +	err = idpf_execute_vc_cmd(adapter, &args);
> > +	if (err != 0)
> > +		DRV_LOG(ERR, "Failed to execute command of
> > VIRTCHNL2_OP_DESTROY_VPORT");
> > +
> > +	return err;
> > +}
> > +
> > +int
> > +idpf_vc_set_rss_key(struct idpf_vport *vport) {
> > +	struct idpf_adapter *adapter = vport->adapter;
> > +	struct virtchnl2_rss_key *rss_key;
> > +	struct idpf_cmd_info args;
> > +	int len, err;
> > +
> > +	len = sizeof(*rss_key) + sizeof(rss_key->key[0]) *
> > +		(vport->rss_key_size - 1);
> > +	rss_key = rte_zmalloc("rss_key", len, 0);
> > +	if (rss_key == NULL)
> > +		return -ENOMEM;
> > +
> > +	rss_key->vport_id = vport->vport_id;
> > +	rss_key->key_len = vport->rss_key_size;
> > +	rte_memcpy(rss_key->key, vport->rss_key,
> > +		   sizeof(rss_key->key[0]) * vport->rss_key_size);
> > +
> > +	memset(&args, 0, sizeof(args));
> > +	args.ops = VIRTCHNL2_OP_SET_RSS_KEY;
> > +	args.in_args = (uint8_t *)rss_key;
> > +	args.in_args_size = len;
> > +	args.out_buffer = adapter->mbx_resp;
> > +	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> > +
> > +	err = idpf_execute_vc_cmd(adapter, &args);
> > +	if (err != 0)
> > +		DRV_LOG(ERR, "Failed to execute command of
> > VIRTCHNL2_OP_SET_RSS_KEY");
> > +
> > +	rte_free(rss_key);
> > +	return err;
> > +}
> > +
> > +int
> > +idpf_vc_set_rss_lut(struct idpf_vport *vport) {
> > +	struct idpf_adapter *adapter = vport->adapter;
> > +	struct virtchnl2_rss_lut *rss_lut;
> > +	struct idpf_cmd_info args;
> > +	int len, err;
> > +
> > +	len = sizeof(*rss_lut) + sizeof(rss_lut->lut[0]) *
> > +		(vport->rss_lut_size - 1);
> > +	rss_lut = rte_zmalloc("rss_lut", len, 0);
> > +	if (rss_lut == NULL)
> > +		return -ENOMEM;
> > +
> > +	rss_lut->vport_id = vport->vport_id;
> > +	rss_lut->lut_entries = vport->rss_lut_size;
> > +	rte_memcpy(rss_lut->lut, vport->rss_lut,
> > +		   sizeof(rss_lut->lut[0]) * vport->rss_lut_size);
> > +
> > +	memset(&args, 0, sizeof(args));
> > +	args.ops = VIRTCHNL2_OP_SET_RSS_LUT;
> > +	args.in_args = (uint8_t *)rss_lut;
> > +	args.in_args_size = len;
> > +	args.out_buffer = adapter->mbx_resp;
> > +	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> > +
> > +	err = idpf_execute_vc_cmd(adapter, &args);
> > +	if (err != 0)
> > +		DRV_LOG(ERR, "Failed to execute command of
> > VIRTCHNL2_OP_SET_RSS_LUT");
> > +
> > +	rte_free(rss_lut);
> > +	return err;
> > +}
> > +
> > +int
> > +idpf_vc_set_rss_hash(struct idpf_vport *vport) {
> > +	struct idpf_adapter *adapter = vport->adapter;
> > +	struct virtchnl2_rss_hash rss_hash;
> > +	struct idpf_cmd_info args;
> > +	int err;
> > +
> > +	memset(&rss_hash, 0, sizeof(rss_hash));
> > +	rss_hash.ptype_groups = vport->rss_hf;
> > +	rss_hash.vport_id = vport->vport_id;
> > +
> > +	memset(&args, 0, sizeof(args));
> > +	args.ops = VIRTCHNL2_OP_SET_RSS_HASH;
> > +	args.in_args = (uint8_t *)&rss_hash;
> > +	args.in_args_size = sizeof(rss_hash);
> > +	args.out_buffer = adapter->mbx_resp;
> > +	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> > +
> > +	err = idpf_execute_vc_cmd(adapter, &args);
> > +	if (err != 0)
> > +		DRV_LOG(ERR, "Failed to execute command of
> > OP_SET_RSS_HASH");
> > +
> > +	return err;
> > +}
> > +
> > +int
> > +idpf_vc_config_irq_map_unmap(struct idpf_vport *vport, uint16_t
> > +nb_rxq,
> > bool map)
> > +{
> > +	struct idpf_adapter *adapter = vport->adapter;
> > +	struct virtchnl2_queue_vector_maps *map_info;
> > +	struct virtchnl2_queue_vector *vecmap;
> > +	struct idpf_cmd_info args;
> > +	int len, i, err = 0;
> > +
> > +	len = sizeof(struct virtchnl2_queue_vector_maps) +
> > +		(nb_rxq - 1) * sizeof(struct virtchnl2_queue_vector);
> > +
> > +	map_info = rte_zmalloc("map_info", len, 0);
> > +	if (map_info == NULL)
> > +		return -ENOMEM;
> > +
> > +	map_info->vport_id = vport->vport_id;
> > +	map_info->num_qv_maps = nb_rxq;
> > +	for (i = 0; i < nb_rxq; i++) {
> > +		vecmap = &map_info->qv_maps[i];
> > +		vecmap->queue_id = vport->qv_map[i].queue_id;
> > +		vecmap->vector_id = vport->qv_map[i].vector_id;
> > +		vecmap->itr_idx = VIRTCHNL2_ITR_IDX_0;
> > +		vecmap->queue_type = VIRTCHNL2_QUEUE_TYPE_RX;
> > +	}
> > +
> > +	args.ops = map ? VIRTCHNL2_OP_MAP_QUEUE_VECTOR :
> > +		VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR;
> > +	args.in_args = (uint8_t *)map_info;
> > +	args.in_args_size = len;
> > +	args.out_buffer = adapter->mbx_resp;
> > +	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> > +	err = idpf_execute_vc_cmd(adapter, &args);
> > +	if (err != 0)
> > +		DRV_LOG(ERR, "Failed to execute command of
> > VIRTCHNL2_OP_%s_QUEUE_VECTOR",
> > +			map ? "MAP" : "UNMAP");
> > +
> > +	rte_free(map_info);
> > +	return err;
> > +}
> > +
> > +int
> > +idpf_vc_alloc_vectors(struct idpf_vport *vport, uint16_t num_vectors)
> > +{
> > +	struct idpf_adapter *adapter = vport->adapter;
> > +	struct virtchnl2_alloc_vectors *alloc_vec;
> > +	struct idpf_cmd_info args;
> > +	int err, len;
> > +
> > +	len = sizeof(struct virtchnl2_alloc_vectors) +
> > +		(num_vectors - 1) * sizeof(struct virtchnl2_vector_chunk);
> > +	alloc_vec = rte_zmalloc("alloc_vec", len, 0);
> > +	if (alloc_vec == NULL)
> > +		return -ENOMEM;
> > +
> > +	alloc_vec->num_vectors = num_vectors;
> > +
> > +	args.ops = VIRTCHNL2_OP_ALLOC_VECTORS;
> > +	args.in_args = (uint8_t *)alloc_vec;
> > +	args.in_args_size = sizeof(struct virtchnl2_alloc_vectors);
> > +	args.out_buffer = adapter->mbx_resp;
> > +	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> > +	err = idpf_execute_vc_cmd(adapter, &args);
> > +	if (err != 0)
> > +		DRV_LOG(ERR, "Failed to execute command
> > VIRTCHNL2_OP_ALLOC_VECTORS");
> > +
> > +	if (vport->recv_vectors == NULL) {
> > +		vport->recv_vectors = rte_zmalloc("recv_vectors", len, 0);
> > +		if (vport->recv_vectors == NULL) {
> > +			rte_free(alloc_vec);
> > +			return -ENOMEM;
> > +		}
> > +	}
> > +
> > +	rte_memcpy(vport->recv_vectors, args.out_buffer, len);
> > +	rte_free(alloc_vec);
> > +	return err;
> > +}
> > +
> > +int
> > +idpf_vc_dealloc_vectors(struct idpf_vport *vport) {
> > +	struct idpf_adapter *adapter = vport->adapter;
> > +	struct virtchnl2_alloc_vectors *alloc_vec;
> > +	struct virtchnl2_vector_chunks *vcs;
> > +	struct idpf_cmd_info args;
> > +	int err, len;
> > +
> > +	alloc_vec = vport->recv_vectors;
> > +	vcs = &alloc_vec->vchunks;
> > +
> > +	len = sizeof(struct virtchnl2_vector_chunks) +
> > +		(vcs->num_vchunks - 1) * sizeof(struct
> > virtchnl2_vector_chunk);
> > +
> > +	args.ops = VIRTCHNL2_OP_DEALLOC_VECTORS;
> > +	args.in_args = (uint8_t *)vcs;
> > +	args.in_args_size = len;
> > +	args.out_buffer = adapter->mbx_resp;
> > +	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> > +	err = idpf_execute_vc_cmd(adapter, &args);
> > +	if (err != 0)
> > +		DRV_LOG(ERR, "Failed to execute command
> > VIRTCHNL2_OP_DEALLOC_VECTORS");
> > +
> > +	return err;
> > +}
> > +
> > +static int
> > +idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
> > +			  uint32_t type, bool on)
> > +{
> > +	struct idpf_adapter *adapter = vport->adapter;
> > +	struct virtchnl2_del_ena_dis_queues *queue_select;
> > +	struct virtchnl2_queue_chunk *queue_chunk;
> > +	struct idpf_cmd_info args;
> > +	int err, len;
> > +
> > +	len = sizeof(struct virtchnl2_del_ena_dis_queues);
> > +	queue_select = rte_zmalloc("queue_select", len, 0);
> > +	if (queue_select == NULL)
> > +		return -ENOMEM;
> > +
> > +	queue_chunk = queue_select->chunks.chunks;
> > +	queue_select->chunks.num_chunks = 1;
> > +	queue_select->vport_id = vport->vport_id;
> > +
> > +	queue_chunk->type = type;
> > +	queue_chunk->start_queue_id = qid;
> > +	queue_chunk->num_queues = 1;
> > +
> > +	args.ops = on ? VIRTCHNL2_OP_ENABLE_QUEUES :
> > +		VIRTCHNL2_OP_DISABLE_QUEUES;
> > +	args.in_args = (uint8_t *)queue_select;
> > +	args.in_args_size = len;
> > +	args.out_buffer = adapter->mbx_resp;
> > +	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> > +	err = idpf_execute_vc_cmd(adapter, &args);
> > +	if (err != 0)
> > +		DRV_LOG(ERR, "Failed to execute command of
> > VIRTCHNL2_OP_%s_QUEUES",
> > +			on ? "ENABLE" : "DISABLE");
> > +
> > +	rte_free(queue_select);
> > +	return err;
> > +}
> > +
> > +int
> > +idpf_switch_queue(struct idpf_vport *vport, uint16_t qid,
> > +		  bool rx, bool on)
> > +{
> > +	uint32_t type;
> > +	int err, queue_id;
> > +
> > +	/* switch txq/rxq */
> > +	type = rx ? VIRTCHNL2_QUEUE_TYPE_RX :
> > VIRTCHNL2_QUEUE_TYPE_TX;
> > +
> > +	if (type == VIRTCHNL2_QUEUE_TYPE_RX)
> > +		queue_id = vport->chunks_info.rx_start_qid + qid;
> > +	else
> > +		queue_id = vport->chunks_info.tx_start_qid + qid;
> > +	err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
> > +	if (err != 0)
> > +		return err;
> > +
> > +	/* switch tx completion queue */
> > +	if (!rx && vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
> > +		type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
> > +		queue_id = vport->chunks_info.tx_compl_start_qid + qid;
> > +		err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
> > +		if (err != 0)
> > +			return err;
> > +	}
> > +
> > +	/* switch rx buffer queue */
> > +	if (rx && vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
> > +		type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
> > +		queue_id = vport->chunks_info.rx_buf_start_qid + 2 * qid;
> > +		err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
> > +		if (err != 0)
> > +			return err;
> > +		queue_id++;
> > +		err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
> > +		if (err != 0)
> > +			return err;
> > +	}
> > +
> > +	return err;
> > +}
> > +
> > +#define IDPF_RXTX_QUEUE_CHUNKS_NUM	2
> > +int
> > +idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable) {
> > +	struct idpf_adapter *adapter = vport->adapter;
> > +	struct virtchnl2_del_ena_dis_queues *queue_select;
> > +	struct virtchnl2_queue_chunk *queue_chunk;
> > +	uint32_t type;
> > +	struct idpf_cmd_info args;
> > +	uint16_t num_chunks;
> > +	int err, len;
> > +
> > +	num_chunks = IDPF_RXTX_QUEUE_CHUNKS_NUM;
> > +	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)
> > +		num_chunks++;
> > +	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)
> > +		num_chunks++;
> > +
> > +	len = sizeof(struct virtchnl2_del_ena_dis_queues) +
> > +		sizeof(struct virtchnl2_queue_chunk) * (num_chunks - 1);
> > +	queue_select = rte_zmalloc("queue_select", len, 0);
> > +	if (queue_select == NULL)
> > +		return -ENOMEM;
> > +
> > +	queue_chunk = queue_select->chunks.chunks;
> > +	queue_select->chunks.num_chunks = num_chunks;
> > +	queue_select->vport_id = vport->vport_id;
> > +
> > +	type = VIRTCHNL_QUEUE_TYPE_RX;
> > +	queue_chunk[type].type = type;
> > +	queue_chunk[type].start_queue_id = vport-
> > >chunks_info.rx_start_qid;
> > +	queue_chunk[type].num_queues = vport->num_rx_q;
> > +
> > +	type = VIRTCHNL2_QUEUE_TYPE_TX;
> > +	queue_chunk[type].type = type;
> > +	queue_chunk[type].start_queue_id = vport-
> > >chunks_info.tx_start_qid;
> > +	queue_chunk[type].num_queues = vport->num_tx_q;
> > +
> > +	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
> > +		type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
> > +		queue_chunk[type].type = type;
> > +		queue_chunk[type].start_queue_id =
> > +			vport->chunks_info.rx_buf_start_qid;
> > +		queue_chunk[type].num_queues = vport->num_rx_bufq;
> > +	}
> > +
> > +	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
> > +		type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
> > +		queue_chunk[type].type = type;
> > +		queue_chunk[type].start_queue_id =
> > +			vport->chunks_info.tx_compl_start_qid;
> > +		queue_chunk[type].num_queues = vport->num_tx_complq;
> > +	}
> > +
> > +	args.ops = enable ? VIRTCHNL2_OP_ENABLE_QUEUES :
> > +		VIRTCHNL2_OP_DISABLE_QUEUES;
> > +	args.in_args = (uint8_t *)queue_select;
> > +	args.in_args_size = len;
> > +	args.out_buffer = adapter->mbx_resp;
> > +	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> > +	err = idpf_execute_vc_cmd(adapter, &args);
> > +	if (err != 0)
> > +		DRV_LOG(ERR, "Failed to execute command of
> > VIRTCHNL2_OP_%s_QUEUES",
> > +			enable ? "ENABLE" : "DISABLE");
> > +
> > +	rte_free(queue_select);
> > +	return err;
> > +}
> > +
> > +int
> > +idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable) {
> > +	struct idpf_adapter *adapter = vport->adapter;
> > +	struct virtchnl2_vport vc_vport;
> > +	struct idpf_cmd_info args;
> > +	int err;
> > +
> > +	vc_vport.vport_id = vport->vport_id;
> > +	args.ops = enable ? VIRTCHNL2_OP_ENABLE_VPORT :
> > +		VIRTCHNL2_OP_DISABLE_VPORT;
> > +	args.in_args = (uint8_t *)&vc_vport;
> > +	args.in_args_size = sizeof(vc_vport);
> > +	args.out_buffer = adapter->mbx_resp;
> > +	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> > +
> > +	err = idpf_execute_vc_cmd(adapter, &args);
> > +	if (err != 0) {
> > +		DRV_LOG(ERR, "Failed to execute command of
> > VIRTCHNL2_OP_%s_VPORT",
> > +			enable ? "ENABLE" : "DISABLE");
> > +	}
> > +
> > +	return err;
> > +}
> > +
> > +int
> > +idpf_vc_query_ptype_info(struct idpf_adapter *adapter) {
> > +	struct virtchnl2_get_ptype_info *ptype_info;
> > +	struct idpf_cmd_info args;
> > +	int len, err;
> > +
> > +	len = sizeof(struct virtchnl2_get_ptype_info);
> > +	ptype_info = rte_zmalloc("ptype_info", len, 0);
> > +	if (ptype_info == NULL)
> > +		return -ENOMEM;
> > +
> > +	ptype_info->start_ptype_id = 0;
> > +	ptype_info->num_ptypes = IDPF_MAX_PKT_TYPE;
> > +	args.ops = VIRTCHNL2_OP_GET_PTYPE_INFO;
> > +	args.in_args = (uint8_t *)ptype_info;
> > +	args.in_args_size = len;
> > +
> > +	err = idpf_execute_vc_cmd(adapter, &args);
> > +	if (err != 0)
> > +		DRV_LOG(ERR, "Failed to execute command of
> > VIRTCHNL2_OP_GET_PTYPE_INFO");
> > +
> > +	rte_free(ptype_info);
> > +	return err;
> > +}
> > diff --git a/drivers/common/idpf/idpf_common_virtchnl.h
> > b/drivers/common/idpf/idpf_common_virtchnl.h
> > new file mode 100644
> > index 0000000000..bbc66d63c4
> > --- /dev/null
> > +++ b/drivers/common/idpf/idpf_common_virtchnl.h
> > @@ -0,0 +1,48 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(c) 2022 Intel Corporation  */
> > +
> > +#ifndef _IDPF_COMMON_VIRTCHNL_H_
> > +#define _IDPF_COMMON_VIRTCHNL_H_
> > +
> > +#include <idpf_common_device.h>
> > +
> > +__rte_internal
> > +int idpf_vc_check_api_version(struct idpf_adapter *adapter);
> > +__rte_internal int idpf_vc_get_caps(struct idpf_adapter *adapter);
> > +__rte_internal int idpf_vc_create_vport(struct idpf_vport *vport,
> > +			 struct virtchnl2_create_vport *vport_info);
> __rte_internal int
> > +idpf_vc_destroy_vport(struct idpf_vport *vport); __rte_internal int
> > +idpf_vc_set_rss_key(struct idpf_vport *vport); __rte_internal int
> > +idpf_vc_set_rss_lut(struct idpf_vport *vport); __rte_internal int
> > +idpf_vc_set_rss_hash(struct idpf_vport *vport); __rte_internal int
> > +idpf_switch_queue(struct idpf_vport *vport, uint16_t qid,
> > +		      bool rx, bool on);
> > +__rte_internal
> > +int idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable);
> > +__rte_internal int idpf_vc_ena_dis_vport(struct idpf_vport *vport,
> > +bool enable); __rte_internal int idpf_vc_config_irq_map_unmap(struct
> > +idpf_vport *vport,
> > +				 uint16_t nb_rxq, bool map);
> > +__rte_internal
> > +int idpf_vc_alloc_vectors(struct idpf_vport *vport, uint16_t
> > +num_vectors); __rte_internal int idpf_vc_dealloc_vectors(struct
> > +idpf_vport *vport); __rte_internal int
> > +idpf_vc_query_ptype_info(struct idpf_adapter *adapter);
> > +__rte_internal int idpf_read_one_msg(struct idpf_adapter *adapter,
> > +uint32_t ops,
> > +		      uint16_t buf_len, uint8_t *buf); __rte_internal int
> > +idpf_execute_vc_cmd(struct idpf_adapter *adapter,
> > +			struct idpf_cmd_info *args);
> > +
> > +#endif /* _IDPF_COMMON_VIRTCHNL_H_ */
> > diff --git a/drivers/common/idpf/meson.build
> > b/drivers/common/idpf/meson.build index 77d997b4a7..d1578641ba
> 100644
> > --- a/drivers/common/idpf/meson.build
> > +++ b/drivers/common/idpf/meson.build
> > @@ -1,4 +1,9 @@
> >  # SPDX-License-Identifier: BSD-3-Clause  # Copyright(c) 2022 Intel
> > Corporation
> >
> > +sources = files(
> > +    'idpf_common_device.c',
> > +    'idpf_common_virtchnl.c',
> > +)
> > +
> >  subdir('base')
> > diff --git a/drivers/common/idpf/version.map
> > b/drivers/common/idpf/version.map index bfb246c752..a2b8780780
> 100644
> > --- a/drivers/common/idpf/version.map
> > +++ b/drivers/common/idpf/version.map
> > @@ -1,12 +1,28 @@
> >  INTERNAL {
> >  	global:
> >
> > +	idpf_ctlq_clean_sq;
> >  	idpf_ctlq_deinit;
> >  	idpf_ctlq_init;
> > -	idpf_ctlq_clean_sq;
> > +	idpf_ctlq_post_rx_buffs;
> >  	idpf_ctlq_recv;
> >  	idpf_ctlq_send;
> > -	idpf_ctlq_post_rx_buffs;

And do we really need to expose all ctlq APIs , ideally all APIs in drivers/common/idpf/base folder could only be consumed by the idpf common module inside, we should wrap it on the upper layer.

> > +	idpf_execute_vc_cmd;
> > +	idpf_read_one_msg;
> > +	idpf_switch_queue;
> 
> I think all APsI be exposed from idpf_common_virtchnl.h can follow the same
> naming rule "idpf_vc*"
> 



  reply	other threads:[~2023-01-18  4:10 UTC|newest]

Thread overview: 79+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <https://patches.dpdk.org/project/dpdk/cover/20230117072626.93796-1-beilei.xing@intel.com/>
2023-01-17  8:06 ` [PATCH v4 00/15] net/idpf: introduce idpf common modle beilei.xing
2023-01-17  8:06   ` [PATCH v4 01/15] common/idpf: add adapter structure beilei.xing
2023-01-17  8:06   ` [PATCH v4 02/15] common/idpf: add vport structure beilei.xing
2023-01-17  8:06   ` [PATCH v4 03/15] common/idpf: add virtual channel functions beilei.xing
2023-01-18  4:00     ` Zhang, Qi Z
2023-01-18  4:10       ` Zhang, Qi Z [this message]
2023-01-17  8:06   ` [PATCH v4 04/15] common/idpf: introduce adapter init and deinit beilei.xing
2023-01-17  8:06   ` [PATCH v4 05/15] common/idpf: add vport init/deinit beilei.xing
2023-01-17  8:06   ` [PATCH v4 06/15] common/idpf: add config RSS beilei.xing
2023-01-17  8:06   ` [PATCH v4 07/15] common/idpf: add irq map/unmap beilei.xing
2023-01-31  8:11     ` Wu, Jingjing
2023-01-17  8:06   ` [PATCH v4 08/15] common/idpf: support get packet type beilei.xing
2023-01-17  8:06   ` [PATCH v4 09/15] common/idpf: add vport info initialization beilei.xing
2023-01-31  8:24     ` Wu, Jingjing
2023-01-17  8:06   ` [PATCH v4 10/15] common/idpf: add vector flags in vport beilei.xing
2023-01-17  8:06   ` [PATCH v4 11/15] common/idpf: add rxq and txq struct beilei.xing
2023-01-17  8:06   ` [PATCH v4 12/15] common/idpf: add help functions for queue setup and release beilei.xing
2023-01-17  8:06   ` [PATCH v4 13/15] common/idpf: add Rx and Tx data path beilei.xing
2023-01-17  8:06   ` [PATCH v4 14/15] common/idpf: add vec queue setup beilei.xing
2023-01-17  8:06   ` [PATCH v4 15/15] common/idpf: add avx512 for single queue model beilei.xing
2023-02-02  9:53   ` [PATCH v5 00/15] net/idpf: introduce idpf common modle beilei.xing
2023-02-02  9:53     ` [PATCH v5 01/15] common/idpf: add adapter structure beilei.xing
2023-02-02  9:53     ` [PATCH v5 02/15] common/idpf: add vport structure beilei.xing
2023-02-02  9:53     ` [PATCH v5 03/15] common/idpf: add virtual channel functions beilei.xing
2023-02-02  9:53     ` [PATCH v5 04/15] common/idpf: introduce adapter init and deinit beilei.xing
2023-02-02  9:53     ` [PATCH v5 05/15] common/idpf: add vport init/deinit beilei.xing
2023-02-02  9:53     ` [PATCH v5 06/15] common/idpf: add config RSS beilei.xing
2023-02-02  9:53     ` [PATCH v5 07/15] common/idpf: add irq map/unmap beilei.xing
2023-02-02  9:53     ` [PATCH v5 08/15] common/idpf: support get packet type beilei.xing
2023-02-02  9:53     ` [PATCH v5 09/15] common/idpf: add vport info initialization beilei.xing
2023-02-02  9:53     ` [PATCH v5 10/15] common/idpf: add vector flags in vport beilei.xing
2023-02-02  9:53     ` [PATCH v5 11/15] common/idpf: add rxq and txq struct beilei.xing
2023-02-02  9:53     ` [PATCH v5 12/15] common/idpf: add help functions for queue setup and release beilei.xing
2023-02-02  9:53     ` [PATCH v5 13/15] common/idpf: add Rx and Tx data path beilei.xing
2023-02-02  9:53     ` [PATCH v5 14/15] common/idpf: add vec queue setup beilei.xing
2023-02-02  9:53     ` [PATCH v5 15/15] common/idpf: add avx512 for single queue model beilei.xing
2023-02-03  9:43     ` [PATCH v6 00/19] net/idpf: introduce idpf common modle beilei.xing
2023-02-03  9:43       ` [PATCH v6 01/19] common/idpf: add adapter structure beilei.xing
2023-02-03  9:43       ` [PATCH v6 02/19] common/idpf: add vport structure beilei.xing
2023-02-03  9:43       ` [PATCH v6 03/19] common/idpf: add virtual channel functions beilei.xing
2023-02-03  9:43       ` [PATCH v6 04/19] common/idpf: introduce adapter init and deinit beilei.xing
2023-02-03  9:43       ` [PATCH v6 05/19] common/idpf: add vport init/deinit beilei.xing
2023-02-03  9:43       ` [PATCH v6 06/19] common/idpf: add config RSS beilei.xing
2023-02-03  9:43       ` [PATCH v6 07/19] common/idpf: add irq map/unmap beilei.xing
2023-02-03  9:43       ` [PATCH v6 08/19] common/idpf: support get packet type beilei.xing
2023-02-03  9:43       ` [PATCH v6 09/19] common/idpf: add vport info initialization beilei.xing
2023-02-03  9:43       ` [PATCH v6 10/19] common/idpf: add vector flags in vport beilei.xing
2023-02-03  9:43       ` [PATCH v6 11/19] common/idpf: add rxq and txq struct beilei.xing
2023-02-03  9:43       ` [PATCH v6 12/19] common/idpf: add help functions for queue setup and release beilei.xing
2023-02-03  9:43       ` [PATCH v6 13/19] common/idpf: add Rx and Tx data path beilei.xing
2023-02-03  9:43       ` [PATCH v6 14/19] common/idpf: add vec queue setup beilei.xing
2023-02-03  9:43       ` [PATCH v6 15/19] common/idpf: add avx512 for single queue model beilei.xing
2023-02-03  9:43       ` [PATCH v6 16/19] common/idpf: refine API name for vport functions beilei.xing
2023-02-03  9:43       ` [PATCH v6 17/19] common/idpf: refine API name for queue config module beilei.xing
2023-02-03  9:43       ` [PATCH v6 18/19] common/idpf: refine API name for data path module beilei.xing
2023-02-03  9:43       ` [PATCH v6 19/19] common/idpf: refine API name for virtual channel functions beilei.xing
2023-02-06  2:58       ` [PATCH v6 00/19] net/idpf: introduce idpf common modle Zhang, Qi Z
2023-02-06  6:16         ` Xing, Beilei
2023-02-06  5:45       ` [PATCH v7 " beilei.xing
2023-02-06  5:46         ` [PATCH v7 01/19] common/idpf: add adapter structure beilei.xing
2023-02-06  5:46         ` [PATCH v7 02/19] common/idpf: add vport structure beilei.xing
2023-02-06  5:46         ` [PATCH v7 03/19] common/idpf: add virtual channel functions beilei.xing
2023-02-06  5:46         ` [PATCH v7 04/19] common/idpf: introduce adapter init and deinit beilei.xing
2023-02-06  5:46         ` [PATCH v7 05/19] common/idpf: add vport init/deinit beilei.xing
2023-02-06  5:46         ` [PATCH v7 06/19] common/idpf: add config RSS beilei.xing
2023-02-06  5:46         ` [PATCH v7 07/19] common/idpf: add irq map/unmap beilei.xing
2023-02-06  5:46         ` [PATCH v7 08/19] common/idpf: support get packet type beilei.xing
2023-02-06  5:46         ` [PATCH v7 09/19] common/idpf: add vport info initialization beilei.xing
2023-02-06  5:46         ` [PATCH v7 10/19] common/idpf: add vector flags in vport beilei.xing
2023-02-06  5:46         ` [PATCH v7 11/19] common/idpf: add rxq and txq struct beilei.xing
2023-02-06  5:46         ` [PATCH v7 12/19] common/idpf: add help functions for queue setup and release beilei.xing
2023-02-06  5:46         ` [PATCH v7 13/19] common/idpf: add Rx and Tx data path beilei.xing
2023-02-06  5:46         ` [PATCH v7 14/19] common/idpf: add vec queue setup beilei.xing
2023-02-06  5:46         ` [PATCH v7 15/19] common/idpf: add avx512 for single queue model beilei.xing
2023-02-06  5:46         ` [PATCH v7 16/19] common/idpf: refine API name for vport functions beilei.xing
2023-02-06  5:46         ` [PATCH v7 17/19] common/idpf: refine API name for queue config module beilei.xing
2023-02-06  5:46         ` [PATCH v7 18/19] common/idpf: refine API name for data path module beilei.xing
2023-02-06  5:46         ` [PATCH v7 19/19] common/idpf: refine API name for virtual channel functions beilei.xing
2023-02-06 13:15         ` [PATCH v7 00/19] net/idpf: introduce idpf common modle Zhang, Qi Z

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=DM4PR11MB5994DB0D1A14506388DE12EED7C79@DM4PR11MB5994.namprd11.prod.outlook.com \
    --to=qi.z.zhang@intel.com \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    --cc=wenjun1.wu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.