kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/2] vDPA/ifcvf: enable multiqueue and control vq
@ 2021-08-18  9:57 Zhu Lingshan
  2021-08-18  9:57 ` [PATCH 1/2] vDPA/ifcvf: detect and use the onboard number of queues directly Zhu Lingshan
                   ` (2 more replies)
  0 siblings, 3 replies; 8+ messages in thread
From: Zhu Lingshan @ 2021-08-18  9:57 UTC (permalink / raw)
  To: jasowang, mst; +Cc: virtualization, netdev, kvm, Zhu Lingshan

This series enables multi-queue and control vq features
for ifcvf.

These patches are based on my previous vDPA/ifcvf management link
implementation series:
https://lore.kernel.org/kvm/20210812032454.24486-2-lingshan.zhu@intel.com/T/

Thanks!

Zhu Lingshan (2):
  vDPA/ifcvf: detect and use the onboard number of queues directly
  vDPA/ifcvf: enable multiqueue and control vq

 drivers/vdpa/ifcvf/ifcvf_base.c |  8 +++++---
 drivers/vdpa/ifcvf/ifcvf_base.h | 19 ++++---------------
 drivers/vdpa/ifcvf/ifcvf_main.c | 32 +++++++++++++++-----------------
 3 files changed, 24 insertions(+), 35 deletions(-)

-- 
2.27.0


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH 1/2] vDPA/ifcvf: detect and use the onboard number of queues directly
  2021-08-18  9:57 [PATCH 0/2] vDPA/ifcvf: enable multiqueue and control vq Zhu Lingshan
@ 2021-08-18  9:57 ` Zhu Lingshan
  2021-08-19  4:09   ` Jason Wang
  2021-08-18  9:57 ` [PATCH 2/2] vDPA/ifcvf: enable multiqueue and control vq Zhu Lingshan
  2021-08-19  4:11 ` [PATCH 0/2] " Jason Wang
  2 siblings, 1 reply; 8+ messages in thread
From: Zhu Lingshan @ 2021-08-18  9:57 UTC (permalink / raw)
  To: jasowang, mst; +Cc: virtualization, netdev, kvm, Zhu Lingshan

To enable this multi-queue feature for ifcvf, this commit
intends to detect and use the onboard number of queues
directly than IFCVF_MAX_QUEUE_PAIRS = 1 (removed)

Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
---
 drivers/vdpa/ifcvf/ifcvf_base.c |  8 +++++---
 drivers/vdpa/ifcvf/ifcvf_base.h | 10 ++++------
 drivers/vdpa/ifcvf/ifcvf_main.c | 21 ++++++++++++---------
 3 files changed, 21 insertions(+), 18 deletions(-)

diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index 6e197fe0fcf9..2808f1ba9f7b 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -158,7 +158,9 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
 		return -EIO;
 	}
 
-	for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
+	hw->nr_vring = ifc_ioread16(&hw->common_cfg->num_queues);
+
+	for (i = 0; i < hw->nr_vring; i++) {
 		ifc_iowrite16(i, &hw->common_cfg->queue_select);
 		notify_off = ifc_ioread16(&hw->common_cfg->queue_notify_off);
 		hw->vring[i].notify_addr = hw->notify_base +
@@ -304,7 +306,7 @@ u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
 	u32 q_pair_id;
 
 	ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
-	q_pair_id = qid / (IFCVF_MAX_QUEUE_PAIRS * 2);
+	q_pair_id = qid / hw->nr_vring;
 	avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
 	last_avail_idx = ifc_ioread16(avail_idx_addr);
 
@@ -318,7 +320,7 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
 	u32 q_pair_id;
 
 	ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
-	q_pair_id = qid / (IFCVF_MAX_QUEUE_PAIRS * 2);
+	q_pair_id = qid / hw->nr_vring;
 	avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
 	hw->vring[qid].last_avail_idx = num;
 	ifc_iowrite16(num, avail_idx_addr);
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
index 1601e87870da..97d9019a3ec0 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -31,8 +31,8 @@
 		 (1ULL << VIRTIO_F_ACCESS_PLATFORM)		| \
 		 (1ULL << VIRTIO_NET_F_MRG_RXBUF))
 
-/* Only one queue pair for now. */
-#define IFCVF_MAX_QUEUE_PAIRS	1
+/* Max 8 data queue pairs(16 queues) and one control vq for now. */
+#define IFCVF_MAX_QUEUES	17
 
 #define IFCVF_QUEUE_ALIGNMENT	PAGE_SIZE
 #define IFCVF_QUEUE_MAX		32768
@@ -51,8 +51,6 @@
 #define ifcvf_private_to_vf(adapter) \
 	(&((struct ifcvf_adapter *)adapter)->vf)
 
-#define IFCVF_MAX_INTR (IFCVF_MAX_QUEUE_PAIRS * 2 + 1)
-
 struct vring_info {
 	u64 desc;
 	u64 avail;
@@ -83,7 +81,7 @@ struct ifcvf_hw {
 	u32 dev_type;
 	struct virtio_pci_common_cfg __iomem *common_cfg;
 	void __iomem *net_cfg;
-	struct vring_info vring[IFCVF_MAX_QUEUE_PAIRS * 2];
+	struct vring_info vring[IFCVF_MAX_QUEUES];
 	void __iomem * const *base;
 	char config_msix_name[256];
 	struct vdpa_callback config_cb;
@@ -103,7 +101,7 @@ struct ifcvf_vring_lm_cfg {
 
 struct ifcvf_lm_cfg {
 	u8 reserved[IFCVF_LM_RING_STATE_OFFSET];
-	struct ifcvf_vring_lm_cfg vring_lm_cfg[IFCVF_MAX_QUEUE_PAIRS];
+	struct ifcvf_vring_lm_cfg vring_lm_cfg[IFCVF_MAX_QUEUES];
 };
 
 struct ifcvf_vdpa_mgmt_dev {
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index 4b623253f460..e34c2ec2b69b 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -63,9 +63,13 @@ static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
 	struct pci_dev *pdev = adapter->pdev;
 	struct ifcvf_hw *vf = &adapter->vf;
 	int vector, i, ret, irq;
+	u16 max_intr;
 
-	ret = pci_alloc_irq_vectors(pdev, IFCVF_MAX_INTR,
-				    IFCVF_MAX_INTR, PCI_IRQ_MSIX);
+	/* all queues and config interrupt  */
+	max_intr = vf->nr_vring + 1;
+
+	ret = pci_alloc_irq_vectors(pdev, max_intr,
+				    max_intr, PCI_IRQ_MSIX);
 	if (ret < 0) {
 		IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
 		return ret;
@@ -83,7 +87,7 @@ static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
 		return ret;
 	}
 
-	for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
+	for (i = 0; i < vf->nr_vring; i++) {
 		snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
 			 pci_name(pdev), i);
 		vector = i + IFCVF_MSI_QUEUE_OFF;
@@ -112,7 +116,6 @@ static int ifcvf_start_datapath(void *private)
 	u8 status;
 	int ret;
 
-	vf->nr_vring = IFCVF_MAX_QUEUE_PAIRS * 2;
 	ret = ifcvf_start_hw(vf);
 	if (ret < 0) {
 		status = ifcvf_get_status(vf);
@@ -128,7 +131,7 @@ static int ifcvf_stop_datapath(void *private)
 	struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
 	int i;
 
-	for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
+	for (i = 0; i < vf->nr_vring; i++)
 		vf->vring[i].cb.callback = NULL;
 
 	ifcvf_stop_hw(vf);
@@ -141,7 +144,7 @@ static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
 	struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
 	int i;
 
-	for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
+	for (i = 0; i < vf->nr_vring; i++) {
 		vf->vring[i].last_avail_idx = 0;
 		vf->vring[i].desc = 0;
 		vf->vring[i].avail = 0;
@@ -227,7 +230,7 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
 	if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) &&
 	    !(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
 		ifcvf_stop_datapath(adapter);
-		ifcvf_free_irq(adapter, IFCVF_MAX_QUEUE_PAIRS * 2);
+		ifcvf_free_irq(adapter, vf->nr_vring);
 	}
 
 	if (status == 0) {
@@ -526,13 +529,13 @@ static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name)
 		goto err;
 	}
 
-	for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
+	for (i = 0; i < vf->nr_vring; i++)
 		vf->vring[i].irq = -EINVAL;
 
 	vf->hw_features = ifcvf_get_hw_features(vf);
 
 	adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev;
-	ret = _vdpa_register_device(&adapter->vdpa, IFCVF_MAX_QUEUE_PAIRS * 2);
+	ret = _vdpa_register_device(&adapter->vdpa, vf->nr_vring);
 	if (ret) {
 		IFCVF_ERR(pdev, "Failed to register to vDPA bus");
 		goto err;
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 2/2] vDPA/ifcvf: enable multiqueue and control vq
  2021-08-18  9:57 [PATCH 0/2] vDPA/ifcvf: enable multiqueue and control vq Zhu Lingshan
  2021-08-18  9:57 ` [PATCH 1/2] vDPA/ifcvf: detect and use the onboard number of queues directly Zhu Lingshan
@ 2021-08-18  9:57 ` Zhu Lingshan
  2021-08-19  4:09   ` Jason Wang
  2021-08-19  4:11 ` [PATCH 0/2] " Jason Wang
  2 siblings, 1 reply; 8+ messages in thread
From: Zhu Lingshan @ 2021-08-18  9:57 UTC (permalink / raw)
  To: jasowang, mst; +Cc: virtualization, netdev, kvm, Zhu Lingshan

This commit enbales multi-queue and control vq
features for ifcvf

Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
---
 drivers/vdpa/ifcvf/ifcvf_base.h |  9 ---------
 drivers/vdpa/ifcvf/ifcvf_main.c | 11 +++--------
 2 files changed, 3 insertions(+), 17 deletions(-)

diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
index 97d9019a3ec0..09918af3ecf8 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -22,15 +22,6 @@
 #define N3000_DEVICE_ID		0x1041
 #define N3000_SUBSYS_DEVICE_ID	0x001A
 
-#define IFCVF_NET_SUPPORTED_FEATURES \
-		((1ULL << VIRTIO_NET_F_MAC)			| \
-		 (1ULL << VIRTIO_F_ANY_LAYOUT)			| \
-		 (1ULL << VIRTIO_F_VERSION_1)			| \
-		 (1ULL << VIRTIO_NET_F_STATUS)			| \
-		 (1ULL << VIRTIO_F_ORDER_PLATFORM)		| \
-		 (1ULL << VIRTIO_F_ACCESS_PLATFORM)		| \
-		 (1ULL << VIRTIO_NET_F_MRG_RXBUF))
-
 /* Max 8 data queue pairs(16 queues) and one control vq for now. */
 #define IFCVF_MAX_QUEUES	17
 
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index e34c2ec2b69b..b99283a98177 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -174,17 +174,12 @@ static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
 	struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
 	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 	struct pci_dev *pdev = adapter->pdev;
-
+	u32 type = vf->dev_type;
 	u64 features;
 
-	switch (vf->dev_type) {
-	case VIRTIO_ID_NET:
-		features = ifcvf_get_features(vf) & IFCVF_NET_SUPPORTED_FEATURES;
-		break;
-	case VIRTIO_ID_BLOCK:
+	if (type == VIRTIO_ID_NET || type == VIRTIO_ID_BLOCK)
 		features = ifcvf_get_features(vf);
-		break;
-	default:
+	else {
 		features = 0;
 		IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
 	}
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/2] vDPA/ifcvf: detect and use the onboard number of queues directly
  2021-08-18  9:57 ` [PATCH 1/2] vDPA/ifcvf: detect and use the onboard number of queues directly Zhu Lingshan
@ 2021-08-19  4:09   ` Jason Wang
  0 siblings, 0 replies; 8+ messages in thread
From: Jason Wang @ 2021-08-19  4:09 UTC (permalink / raw)
  To: Zhu Lingshan, mst; +Cc: virtualization, netdev, kvm


在 2021/8/18 下午5:57, Zhu Lingshan 写道:
> To enable this multi-queue feature for ifcvf, this commit
> intends to detect and use the onboard number of queues
> directly than IFCVF_MAX_QUEUE_PAIRS = 1 (removed)
>
> Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
> ---
>   drivers/vdpa/ifcvf/ifcvf_base.c |  8 +++++---
>   drivers/vdpa/ifcvf/ifcvf_base.h | 10 ++++------
>   drivers/vdpa/ifcvf/ifcvf_main.c | 21 ++++++++++++---------
>   3 files changed, 21 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
> index 6e197fe0fcf9..2808f1ba9f7b 100644
> --- a/drivers/vdpa/ifcvf/ifcvf_base.c
> +++ b/drivers/vdpa/ifcvf/ifcvf_base.c
> @@ -158,7 +158,9 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
>   		return -EIO;
>   	}
>   
> -	for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
> +	hw->nr_vring = ifc_ioread16(&hw->common_cfg->num_queues);
> +
> +	for (i = 0; i < hw->nr_vring; i++) {
>   		ifc_iowrite16(i, &hw->common_cfg->queue_select);
>   		notify_off = ifc_ioread16(&hw->common_cfg->queue_notify_off);
>   		hw->vring[i].notify_addr = hw->notify_base +
> @@ -304,7 +306,7 @@ u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
>   	u32 q_pair_id;
>   
>   	ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
> -	q_pair_id = qid / (IFCVF_MAX_QUEUE_PAIRS * 2);
> +	q_pair_id = qid / hw->nr_vring;
>   	avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
>   	last_avail_idx = ifc_ioread16(avail_idx_addr);
>   
> @@ -318,7 +320,7 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
>   	u32 q_pair_id;
>   
>   	ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
> -	q_pair_id = qid / (IFCVF_MAX_QUEUE_PAIRS * 2);
> +	q_pair_id = qid / hw->nr_vring;
>   	avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
>   	hw->vring[qid].last_avail_idx = num;
>   	ifc_iowrite16(num, avail_idx_addr);
> diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
> index 1601e87870da..97d9019a3ec0 100644
> --- a/drivers/vdpa/ifcvf/ifcvf_base.h
> +++ b/drivers/vdpa/ifcvf/ifcvf_base.h
> @@ -31,8 +31,8 @@
>   		 (1ULL << VIRTIO_F_ACCESS_PLATFORM)		| \
>   		 (1ULL << VIRTIO_NET_F_MRG_RXBUF))
>   
> -/* Only one queue pair for now. */
> -#define IFCVF_MAX_QUEUE_PAIRS	1
> +/* Max 8 data queue pairs(16 queues) and one control vq for now. */
> +#define IFCVF_MAX_QUEUES	17


While at it, I wonder if we can get rid of this.

Other than this,

Acked-by: Jason Wang <jasowang@redhat.com>


>   
>   #define IFCVF_QUEUE_ALIGNMENT	PAGE_SIZE
>   #define IFCVF_QUEUE_MAX		32768
> @@ -51,8 +51,6 @@
>   #define ifcvf_private_to_vf(adapter) \
>   	(&((struct ifcvf_adapter *)adapter)->vf)
>   
> -#define IFCVF_MAX_INTR (IFCVF_MAX_QUEUE_PAIRS * 2 + 1)
> -
>   struct vring_info {
>   	u64 desc;
>   	u64 avail;
> @@ -83,7 +81,7 @@ struct ifcvf_hw {
>   	u32 dev_type;
>   	struct virtio_pci_common_cfg __iomem *common_cfg;
>   	void __iomem *net_cfg;
> -	struct vring_info vring[IFCVF_MAX_QUEUE_PAIRS * 2];
> +	struct vring_info vring[IFCVF_MAX_QUEUES];
>   	void __iomem * const *base;
>   	char config_msix_name[256];
>   	struct vdpa_callback config_cb;
> @@ -103,7 +101,7 @@ struct ifcvf_vring_lm_cfg {
>   
>   struct ifcvf_lm_cfg {
>   	u8 reserved[IFCVF_LM_RING_STATE_OFFSET];
> -	struct ifcvf_vring_lm_cfg vring_lm_cfg[IFCVF_MAX_QUEUE_PAIRS];
> +	struct ifcvf_vring_lm_cfg vring_lm_cfg[IFCVF_MAX_QUEUES];
>   };
>   
>   struct ifcvf_vdpa_mgmt_dev {
> diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
> index 4b623253f460..e34c2ec2b69b 100644
> --- a/drivers/vdpa/ifcvf/ifcvf_main.c
> +++ b/drivers/vdpa/ifcvf/ifcvf_main.c
> @@ -63,9 +63,13 @@ static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
>   	struct pci_dev *pdev = adapter->pdev;
>   	struct ifcvf_hw *vf = &adapter->vf;
>   	int vector, i, ret, irq;
> +	u16 max_intr;
>   
> -	ret = pci_alloc_irq_vectors(pdev, IFCVF_MAX_INTR,
> -				    IFCVF_MAX_INTR, PCI_IRQ_MSIX);
> +	/* all queues and config interrupt  */
> +	max_intr = vf->nr_vring + 1;
> +
> +	ret = pci_alloc_irq_vectors(pdev, max_intr,
> +				    max_intr, PCI_IRQ_MSIX);
>   	if (ret < 0) {
>   		IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
>   		return ret;
> @@ -83,7 +87,7 @@ static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
>   		return ret;
>   	}
>   
> -	for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
> +	for (i = 0; i < vf->nr_vring; i++) {
>   		snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
>   			 pci_name(pdev), i);
>   		vector = i + IFCVF_MSI_QUEUE_OFF;
> @@ -112,7 +116,6 @@ static int ifcvf_start_datapath(void *private)
>   	u8 status;
>   	int ret;
>   
> -	vf->nr_vring = IFCVF_MAX_QUEUE_PAIRS * 2;
>   	ret = ifcvf_start_hw(vf);
>   	if (ret < 0) {
>   		status = ifcvf_get_status(vf);
> @@ -128,7 +131,7 @@ static int ifcvf_stop_datapath(void *private)
>   	struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
>   	int i;
>   
> -	for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
> +	for (i = 0; i < vf->nr_vring; i++)
>   		vf->vring[i].cb.callback = NULL;
>   
>   	ifcvf_stop_hw(vf);
> @@ -141,7 +144,7 @@ static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
>   	struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
>   	int i;
>   
> -	for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
> +	for (i = 0; i < vf->nr_vring; i++) {
>   		vf->vring[i].last_avail_idx = 0;
>   		vf->vring[i].desc = 0;
>   		vf->vring[i].avail = 0;
> @@ -227,7 +230,7 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
>   	if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) &&
>   	    !(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
>   		ifcvf_stop_datapath(adapter);
> -		ifcvf_free_irq(adapter, IFCVF_MAX_QUEUE_PAIRS * 2);
> +		ifcvf_free_irq(adapter, vf->nr_vring);
>   	}
>   
>   	if (status == 0) {
> @@ -526,13 +529,13 @@ static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name)
>   		goto err;
>   	}
>   
> -	for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
> +	for (i = 0; i < vf->nr_vring; i++)
>   		vf->vring[i].irq = -EINVAL;
>   
>   	vf->hw_features = ifcvf_get_hw_features(vf);
>   
>   	adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev;
> -	ret = _vdpa_register_device(&adapter->vdpa, IFCVF_MAX_QUEUE_PAIRS * 2);
> +	ret = _vdpa_register_device(&adapter->vdpa, vf->nr_vring);
>   	if (ret) {
>   		IFCVF_ERR(pdev, "Failed to register to vDPA bus");
>   		goto err;


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 2/2] vDPA/ifcvf: enable multiqueue and control vq
  2021-08-18  9:57 ` [PATCH 2/2] vDPA/ifcvf: enable multiqueue and control vq Zhu Lingshan
@ 2021-08-19  4:09   ` Jason Wang
  0 siblings, 0 replies; 8+ messages in thread
From: Jason Wang @ 2021-08-19  4:09 UTC (permalink / raw)
  To: Zhu Lingshan, mst; +Cc: virtualization, netdev, kvm


在 2021/8/18 下午5:57, Zhu Lingshan 写道:
> This commit enbales multi-queue and control vq
> features for ifcvf
>
> Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>


Acked-by: Jason Wang <jasowang@redhat.com>


> ---
>   drivers/vdpa/ifcvf/ifcvf_base.h |  9 ---------
>   drivers/vdpa/ifcvf/ifcvf_main.c | 11 +++--------
>   2 files changed, 3 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
> index 97d9019a3ec0..09918af3ecf8 100644
> --- a/drivers/vdpa/ifcvf/ifcvf_base.h
> +++ b/drivers/vdpa/ifcvf/ifcvf_base.h
> @@ -22,15 +22,6 @@
>   #define N3000_DEVICE_ID		0x1041
>   #define N3000_SUBSYS_DEVICE_ID	0x001A
>   
> -#define IFCVF_NET_SUPPORTED_FEATURES \
> -		((1ULL << VIRTIO_NET_F_MAC)			| \
> -		 (1ULL << VIRTIO_F_ANY_LAYOUT)			| \
> -		 (1ULL << VIRTIO_F_VERSION_1)			| \
> -		 (1ULL << VIRTIO_NET_F_STATUS)			| \
> -		 (1ULL << VIRTIO_F_ORDER_PLATFORM)		| \
> -		 (1ULL << VIRTIO_F_ACCESS_PLATFORM)		| \
> -		 (1ULL << VIRTIO_NET_F_MRG_RXBUF))
> -
>   /* Max 8 data queue pairs(16 queues) and one control vq for now. */
>   #define IFCVF_MAX_QUEUES	17
>   
> diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
> index e34c2ec2b69b..b99283a98177 100644
> --- a/drivers/vdpa/ifcvf/ifcvf_main.c
> +++ b/drivers/vdpa/ifcvf/ifcvf_main.c
> @@ -174,17 +174,12 @@ static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
>   	struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
>   	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>   	struct pci_dev *pdev = adapter->pdev;
> -
> +	u32 type = vf->dev_type;
>   	u64 features;
>   
> -	switch (vf->dev_type) {
> -	case VIRTIO_ID_NET:
> -		features = ifcvf_get_features(vf) & IFCVF_NET_SUPPORTED_FEATURES;
> -		break;
> -	case VIRTIO_ID_BLOCK:
> +	if (type == VIRTIO_ID_NET || type == VIRTIO_ID_BLOCK)
>   		features = ifcvf_get_features(vf);
> -		break;
> -	default:
> +	else {
>   		features = 0;
>   		IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
>   	}


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 0/2] vDPA/ifcvf: enable multiqueue and control vq
  2021-08-18  9:57 [PATCH 0/2] vDPA/ifcvf: enable multiqueue and control vq Zhu Lingshan
  2021-08-18  9:57 ` [PATCH 1/2] vDPA/ifcvf: detect and use the onboard number of queues directly Zhu Lingshan
  2021-08-18  9:57 ` [PATCH 2/2] vDPA/ifcvf: enable multiqueue and control vq Zhu Lingshan
@ 2021-08-19  4:11 ` Jason Wang
  2021-08-19  6:49   ` Zhu, Lingshan
  2 siblings, 1 reply; 8+ messages in thread
From: Jason Wang @ 2021-08-19  4:11 UTC (permalink / raw)
  To: Zhu Lingshan, mst; +Cc: virtualization, netdev, kvm


在 2021/8/18 下午5:57, Zhu Lingshan 写道:
> This series enables multi-queue and control vq features
> for ifcvf.
>
> These patches are based on my previous vDPA/ifcvf management link
> implementation series:
> https://lore.kernel.org/kvm/20210812032454.24486-2-lingshan.zhu@intel.com/T/
>
> Thanks!
>
> Zhu Lingshan (2):
>    vDPA/ifcvf: detect and use the onboard number of queues directly
>    vDPA/ifcvf: enable multiqueue and control vq
>
>   drivers/vdpa/ifcvf/ifcvf_base.c |  8 +++++---
>   drivers/vdpa/ifcvf/ifcvf_base.h | 19 ++++---------------
>   drivers/vdpa/ifcvf/ifcvf_main.c | 32 +++++++++++++++-----------------
>   3 files changed, 24 insertions(+), 35 deletions(-)
>

Patch looks good.

I wonder the compatibility. E.g does it work on the qemu master without 
cvq support? (mq=off or not specified)

Thanks


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 0/2] vDPA/ifcvf: enable multiqueue and control vq
  2021-08-19  4:11 ` [PATCH 0/2] " Jason Wang
@ 2021-08-19  6:49   ` Zhu, Lingshan
  2021-08-19  7:15     ` Jason Wang
  0 siblings, 1 reply; 8+ messages in thread
From: Zhu, Lingshan @ 2021-08-19  6:49 UTC (permalink / raw)
  To: Jason Wang, mst; +Cc: virtualization, netdev, kvm



On 8/19/2021 12:11 PM, Jason Wang wrote:
>
> 在 2021/8/18 下午5:57, Zhu Lingshan 写道:
>> This series enables multi-queue and control vq features
>> for ifcvf.
>>
>> These patches are based on my previous vDPA/ifcvf management link
>> implementation series:
>> https://lore.kernel.org/kvm/20210812032454.24486-2-lingshan.zhu@intel.com/T/ 
>>
>>
>> Thanks!
>>
>> Zhu Lingshan (2):
>>    vDPA/ifcvf: detect and use the onboard number of queues directly
>>    vDPA/ifcvf: enable multiqueue and control vq
>>
>>   drivers/vdpa/ifcvf/ifcvf_base.c |  8 +++++---
>>   drivers/vdpa/ifcvf/ifcvf_base.h | 19 ++++---------------
>>   drivers/vdpa/ifcvf/ifcvf_main.c | 32 +++++++++++++++-----------------
>>   3 files changed, 24 insertions(+), 35 deletions(-)
>>
>
> Patch looks good.
>
> I wonder the compatibility. E.g does it work on the qemu master 
> without cvq support? (mq=off or not specified)
Hi Jason,

Yes, it works with qemu master. When no cvq/mq support, only one queue 
pair shown.

Thanks,
Zhu Lingshan
>
> Thanks
>


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 0/2] vDPA/ifcvf: enable multiqueue and control vq
  2021-08-19  6:49   ` Zhu, Lingshan
@ 2021-08-19  7:15     ` Jason Wang
  0 siblings, 0 replies; 8+ messages in thread
From: Jason Wang @ 2021-08-19  7:15 UTC (permalink / raw)
  To: Zhu, Lingshan; +Cc: mst, virtualization, netdev, kvm

On Thu, Aug 19, 2021 at 2:50 PM Zhu, Lingshan <lingshan.zhu@intel.com> wrote:
>
>
>
> On 8/19/2021 12:11 PM, Jason Wang wrote:
> >
> > 在 2021/8/18 下午5:57, Zhu Lingshan 写道:
> >> This series enables multi-queue and control vq features
> >> for ifcvf.
> >>
> >> These patches are based on my previous vDPA/ifcvf management link
> >> implementation series:
> >> https://lore.kernel.org/kvm/20210812032454.24486-2-lingshan.zhu@intel.com/T/
> >>
> >>
> >> Thanks!
> >>
> >> Zhu Lingshan (2):
> >>    vDPA/ifcvf: detect and use the onboard number of queues directly
> >>    vDPA/ifcvf: enable multiqueue and control vq
> >>
> >>   drivers/vdpa/ifcvf/ifcvf_base.c |  8 +++++---
> >>   drivers/vdpa/ifcvf/ifcvf_base.h | 19 ++++---------------
> >>   drivers/vdpa/ifcvf/ifcvf_main.c | 32 +++++++++++++++-----------------
> >>   3 files changed, 24 insertions(+), 35 deletions(-)
> >>
> >
> > Patch looks good.
> >
> > I wonder the compatibility. E.g does it work on the qemu master
> > without cvq support? (mq=off or not specified)
> Hi Jason,
>
> Yes, it works with qemu master. When no cvq/mq support, only one queue
> pair shown.

Good to know this.

Thanks

>
> Thanks,
> Zhu Lingshan
> >
> > Thanks
> >
>


^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2021-08-19  7:16 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-08-18  9:57 [PATCH 0/2] vDPA/ifcvf: enable multiqueue and control vq Zhu Lingshan
2021-08-18  9:57 ` [PATCH 1/2] vDPA/ifcvf: detect and use the onboard number of queues directly Zhu Lingshan
2021-08-19  4:09   ` Jason Wang
2021-08-18  9:57 ` [PATCH 2/2] vDPA/ifcvf: enable multiqueue and control vq Zhu Lingshan
2021-08-19  4:09   ` Jason Wang
2021-08-19  4:11 ` [PATCH 0/2] " Jason Wang
2021-08-19  6:49   ` Zhu, Lingshan
2021-08-19  7:15     ` Jason Wang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).