All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] vDPA/ifcvf: allow userspace to suspend a queue
@ 2022-04-11  3:10 Zhu Lingshan
  2022-04-12  6:55 ` Zhou Furong
  2022-04-13  8:25   ` Jason Wang
  0 siblings, 2 replies; 10+ messages in thread
From: Zhu Lingshan @ 2022-04-11  3:10 UTC (permalink / raw)
  To: jasowang, mst; +Cc: virtualization, netdev, Zhu Lingshan

Formerly, ifcvf driver has implemented a lazy-initialization mechanism
for the virtqueues, it would store all virtqueue config fields that
passed down from the userspace, then load them to the virtqueues and
enable the queues upon DRIVER_OK.

To allow the userspace to suspend a virtqueue,
this commit passes queue_enable to the virtqueue directly through
set_vq_ready().

This feature requires and this commits implementing all virtqueue
ops(set_vq_addr, set_vq_num and set_vq_ready) to take immediate
actions than lazy-initialization, so ifcvf_hw_enable() is retired.

To avoid losing virtqueue configurations caused by multiple
rounds of reset(), this commit also refactors thed evice reset
routine, now it simply reset the config handler and the virtqueues,
and only once device-reset().

Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
---
 drivers/vdpa/ifcvf/ifcvf_base.c | 94 ++++++++++++++++++++-------------
 drivers/vdpa/ifcvf/ifcvf_base.h | 11 ++--
 drivers/vdpa/ifcvf/ifcvf_main.c | 57 +++++---------------
 3 files changed, 75 insertions(+), 87 deletions(-)

diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index 48c4dadb0c7c..19eb0dcac123 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -175,16 +175,12 @@ u8 ifcvf_get_status(struct ifcvf_hw *hw)
 void ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
 {
 	vp_iowrite8(status, &hw->common_cfg->device_status);
+	vp_ioread8(&hw->common_cfg->device_status);
 }
 
 void ifcvf_reset(struct ifcvf_hw *hw)
 {
-	hw->config_cb.callback = NULL;
-	hw->config_cb.private = NULL;
-
 	ifcvf_set_status(hw, 0);
-	/* flush set_status, make sure VF is stopped, reset */
-	ifcvf_get_status(hw);
 }
 
 static void ifcvf_add_status(struct ifcvf_hw *hw, u8 status)
@@ -331,68 +327,94 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
 	ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
 	q_pair_id = qid / hw->nr_vring;
 	avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
-	hw->vring[qid].last_avail_idx = num;
 	vp_iowrite16(num, avail_idx_addr);
+	vp_ioread16(avail_idx_addr);
 
 	return 0;
 }
 
-static int ifcvf_hw_enable(struct ifcvf_hw *hw)
+void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num)
 {
-	struct virtio_pci_common_cfg __iomem *cfg;
-	u32 i;
+	struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
 
-	cfg = hw->common_cfg;
-	for (i = 0; i < hw->nr_vring; i++) {
-		if (!hw->vring[i].ready)
-			break;
+	vp_iowrite16(qid, &cfg->queue_select);
+	vp_iowrite16(num, &cfg->queue_size);
+	vp_ioread16(&cfg->queue_size);
+}
 
-		vp_iowrite16(i, &cfg->queue_select);
-		vp_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
-				     &cfg->queue_desc_hi);
-		vp_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
-				      &cfg->queue_avail_hi);
-		vp_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
-				     &cfg->queue_used_hi);
-		vp_iowrite16(hw->vring[i].size, &cfg->queue_size);
-		ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx);
-		vp_iowrite16(1, &cfg->queue_enable);
-	}
+int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
+			 u64 driver_area, u64 device_area)
+{
+	struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
+
+	vp_iowrite16(qid, &cfg->queue_select);
+	vp_iowrite64_twopart(desc_area, &cfg->queue_desc_lo,
+			     &cfg->queue_desc_hi);
+	vp_iowrite64_twopart(driver_area, &cfg->queue_avail_lo,
+			     &cfg->queue_avail_hi);
+	vp_iowrite64_twopart(device_area, &cfg->queue_used_lo,
+			     &cfg->queue_used_hi);
+	/* to flush IO */
+	vp_ioread16(&cfg->queue_select);
 
 	return 0;
 }
 
-static void ifcvf_hw_disable(struct ifcvf_hw *hw)
+void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready)
 {
-	u32 i;
+	struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
 
-	ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
-	for (i = 0; i < hw->nr_vring; i++) {
-		ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR);
-	}
+	vp_iowrite16(qid, &cfg->queue_select);
+	vp_iowrite16(ready, &cfg->queue_enable);
+	vp_ioread16(&cfg->queue_enable);
+}
+
+bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid)
+{
+	struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
+	bool queue_enable;
+
+	vp_iowrite16(qid, &cfg->queue_select);
+	queue_enable = vp_ioread16(&cfg->queue_enable);
+
+	return (bool)queue_enable;
 }
 
 int ifcvf_start_hw(struct ifcvf_hw *hw)
 {
-	ifcvf_reset(hw);
 	ifcvf_add_status(hw, VIRTIO_CONFIG_S_ACKNOWLEDGE);
 	ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER);
 
 	if (ifcvf_config_features(hw) < 0)
 		return -EINVAL;
 
-	if (ifcvf_hw_enable(hw) < 0)
-		return -EINVAL;
-
 	ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER_OK);
 
 	return 0;
 }
 
+static void ifcvf_reset_vring(struct ifcvf_hw *hw)
+{
+	int i;
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		hw->vring[i].cb.callback = NULL;
+		hw->vring[i].cb.private = NULL;
+		ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR);
+	}
+}
+
+static void ifcvf_reset_config_handler(struct ifcvf_hw *hw)
+{
+	hw->config_cb.callback = NULL;
+	hw->config_cb.private = NULL;
+	ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
+}
+
 void ifcvf_stop_hw(struct ifcvf_hw *hw)
 {
-	ifcvf_hw_disable(hw);
-	ifcvf_reset(hw);
+	ifcvf_reset_vring(hw);
+	ifcvf_reset_config_handler(hw);
 }
 
 void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
index 115b61f4924b..41d86985361f 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -49,12 +49,6 @@
 #define MSIX_VECTOR_DEV_SHARED			3
 
 struct vring_info {
-	u64 desc;
-	u64 avail;
-	u64 used;
-	u16 size;
-	u16 last_avail_idx;
-	bool ready;
 	void __iomem *notify_addr;
 	phys_addr_t notify_pa;
 	u32 irq;
@@ -131,6 +125,11 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
 struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
 int ifcvf_probed_virtio_net(struct ifcvf_hw *hw);
 u32 ifcvf_get_config_size(struct ifcvf_hw *hw);
+int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
+			 u64 driver_area, u64 device_area);
 u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector);
 u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector);
+void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num);
+void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready);
+bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid);
 #endif /* _IFCVF_H_ */
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index 4366320fb68d..e442aa11333e 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -374,37 +374,6 @@ static int ifcvf_start_datapath(void *private)
 	return ret;
 }
 
-static int ifcvf_stop_datapath(void *private)
-{
-	struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
-	int i;
-
-	for (i = 0; i < vf->nr_vring; i++)
-		vf->vring[i].cb.callback = NULL;
-
-	ifcvf_stop_hw(vf);
-
-	return 0;
-}
-
-static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
-{
-	struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
-	int i;
-
-	for (i = 0; i < vf->nr_vring; i++) {
-		vf->vring[i].last_avail_idx = 0;
-		vf->vring[i].desc = 0;
-		vf->vring[i].avail = 0;
-		vf->vring[i].used = 0;
-		vf->vring[i].ready = 0;
-		vf->vring[i].cb.callback = NULL;
-		vf->vring[i].cb.private = NULL;
-	}
-
-	ifcvf_reset(vf);
-}
-
 static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
 {
 	return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
@@ -477,6 +446,8 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
 	if (status_old == status)
 		return;
 
+	ifcvf_set_status(vf, status);
+
 	if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
 	    !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
 		ret = ifcvf_request_irq(adapter);
@@ -493,7 +464,6 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
 				  status);
 	}
 
-	ifcvf_set_status(vf, status);
 }
 
 static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
@@ -509,12 +479,10 @@ static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
 	if (status_old == 0)
 		return 0;
 
-	if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
-		ifcvf_stop_datapath(adapter);
-		ifcvf_free_irq(adapter);
-	}
+	ifcvf_stop_hw(vf);
+	ifcvf_free_irq(adapter);
 
-	ifcvf_reset_vring(adapter);
+	ifcvf_reset(vf);
 
 	return 0;
 }
@@ -554,14 +522,17 @@ static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
 {
 	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 
-	vf->vring[qid].ready = ready;
+	ifcvf_set_vq_ready(vf, qid, ready);
 }
 
 static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
 {
 	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+	bool ready;
 
-	return vf->vring[qid].ready;
+	ready = ifcvf_get_vq_ready(vf, qid);
+
+	return ready;
 }
 
 static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
@@ -569,7 +540,7 @@ static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
 {
 	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 
-	vf->vring[qid].size = num;
+	ifcvf_set_vq_num(vf, qid, num);
 }
 
 static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
@@ -578,11 +549,7 @@ static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
 {
 	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 
-	vf->vring[qid].desc = desc_area;
-	vf->vring[qid].avail = driver_area;
-	vf->vring[qid].used = device_area;
-
-	return 0;
+	return ifcvf_set_vq_address(vf, qid, desc_area, driver_area, device_area);
 }
 
 static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH] vDPA/ifcvf: allow userspace to suspend a queue
  2022-04-11  3:10 [PATCH] vDPA/ifcvf: allow userspace to suspend a queue Zhu Lingshan
@ 2022-04-12  6:55 ` Zhou Furong
  2022-04-21 11:05   ` Zhu, Lingshan
  2022-04-13  8:25   ` Jason Wang
  1 sibling, 1 reply; 10+ messages in thread
From: Zhou Furong @ 2022-04-12  6:55 UTC (permalink / raw)
  To: Zhu Lingshan, jasowang, mst; +Cc: virtualization, netdev

Hi,

> +bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid)
> +{
> +	struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
> +	bool queue_enable;
> +
> +	vp_iowrite16(qid, &cfg->queue_select);
> +	queue_enable = vp_ioread16(&cfg->queue_enable);
> +
> +	return (bool)queue_enable;
queue_enable is bool, why cast? looks like remove the variable is better.
return vp_ioread16(&cfg->queue_enable);

>   static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
>   {
>   	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
> +	bool ready;
>   
> -	return vf->vring[qid].ready;
> +	ready = ifcvf_get_vq_ready(vf, qid);
> +
> +	return ready;
remove ready looks better
return ifcvf_get_vq_ready(vf, qid);


Best regards,
Furong

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] vDPA/ifcvf: allow userspace to suspend a queue
  2022-04-11  3:10 [PATCH] vDPA/ifcvf: allow userspace to suspend a queue Zhu Lingshan
@ 2022-04-13  8:25   ` Jason Wang
  2022-04-13  8:25   ` Jason Wang
  1 sibling, 0 replies; 10+ messages in thread
From: Jason Wang @ 2022-04-13  8:25 UTC (permalink / raw)
  To: Zhu Lingshan; +Cc: mst, virtualization, netdev

On Mon, Apr 11, 2022 at 11:18 AM Zhu Lingshan <lingshan.zhu@intel.com> wrote:
>
> Formerly, ifcvf driver has implemented a lazy-initialization mechanism
> for the virtqueues, it would store all virtqueue config fields that
> passed down from the userspace, then load them to the virtqueues and
> enable the queues upon DRIVER_OK.
>
> To allow the userspace to suspend a virtqueue,
> this commit passes queue_enable to the virtqueue directly through
> set_vq_ready().
>
> This feature requires and this commits implementing all virtqueue
> ops(set_vq_addr, set_vq_num and set_vq_ready) to take immediate
> actions than lazy-initialization, so ifcvf_hw_enable() is retired.
>
> To avoid losing virtqueue configurations caused by multiple
> rounds of reset(), this commit also refactors thed evice reset
> routine, now it simply reset the config handler and the virtqueues,
> and only once device-reset().
>
> Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
> ---
>  drivers/vdpa/ifcvf/ifcvf_base.c | 94 ++++++++++++++++++++-------------
>  drivers/vdpa/ifcvf/ifcvf_base.h | 11 ++--
>  drivers/vdpa/ifcvf/ifcvf_main.c | 57 +++++---------------
>  3 files changed, 75 insertions(+), 87 deletions(-)
>
> diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
> index 48c4dadb0c7c..19eb0dcac123 100644
> --- a/drivers/vdpa/ifcvf/ifcvf_base.c
> +++ b/drivers/vdpa/ifcvf/ifcvf_base.c
> @@ -175,16 +175,12 @@ u8 ifcvf_get_status(struct ifcvf_hw *hw)
>  void ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
>  {
>         vp_iowrite8(status, &hw->common_cfg->device_status);
> +       vp_ioread8(&hw->common_cfg->device_status);

This looks confusing, the name of the function is to set the status
but what actually implemented here is to get the status.

>  }
>
>  void ifcvf_reset(struct ifcvf_hw *hw)
>  {
> -       hw->config_cb.callback = NULL;
> -       hw->config_cb.private = NULL;
> -
>         ifcvf_set_status(hw, 0);
> -       /* flush set_status, make sure VF is stopped, reset */
> -       ifcvf_get_status(hw);
>  }
>
>  static void ifcvf_add_status(struct ifcvf_hw *hw, u8 status)
> @@ -331,68 +327,94 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
>         ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
>         q_pair_id = qid / hw->nr_vring;
>         avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
> -       hw->vring[qid].last_avail_idx = num;
>         vp_iowrite16(num, avail_idx_addr);
> +       vp_ioread16(avail_idx_addr);

This looks like a bug fix.

>
>         return 0;
>  }
>
> -static int ifcvf_hw_enable(struct ifcvf_hw *hw)
> +void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num)
>  {
> -       struct virtio_pci_common_cfg __iomem *cfg;
> -       u32 i;
> +       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
>
> -       cfg = hw->common_cfg;
> -       for (i = 0; i < hw->nr_vring; i++) {
> -               if (!hw->vring[i].ready)
> -                       break;
> +       vp_iowrite16(qid, &cfg->queue_select);
> +       vp_iowrite16(num, &cfg->queue_size);
> +       vp_ioread16(&cfg->queue_size);
> +}
>
> -               vp_iowrite16(i, &cfg->queue_select);
> -               vp_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
> -                                    &cfg->queue_desc_hi);
> -               vp_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
> -                                     &cfg->queue_avail_hi);
> -               vp_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
> -                                    &cfg->queue_used_hi);
> -               vp_iowrite16(hw->vring[i].size, &cfg->queue_size);
> -               ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx);
> -               vp_iowrite16(1, &cfg->queue_enable);
> -       }
> +int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
> +                        u64 driver_area, u64 device_area)
> +{
> +       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
> +
> +       vp_iowrite16(qid, &cfg->queue_select);
> +       vp_iowrite64_twopart(desc_area, &cfg->queue_desc_lo,
> +                            &cfg->queue_desc_hi);
> +       vp_iowrite64_twopart(driver_area, &cfg->queue_avail_lo,
> +                            &cfg->queue_avail_hi);
> +       vp_iowrite64_twopart(device_area, &cfg->queue_used_lo,
> +                            &cfg->queue_used_hi);
> +       /* to flush IO */
> +       vp_ioread16(&cfg->queue_select);

Why do we need to flush I/O here?

>
>         return 0;
>  }
>
> -static void ifcvf_hw_disable(struct ifcvf_hw *hw)
> +void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready)
>  {
> -       u32 i;
> +       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
>
> -       ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
> -       for (i = 0; i < hw->nr_vring; i++) {
> -               ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR);
> -       }
> +       vp_iowrite16(qid, &cfg->queue_select);
> +       vp_iowrite16(ready, &cfg->queue_enable);

I think we need a comment to explain that IFCVF can support write to
queue_enable since it's not allowed by the virtio spec.

> +       vp_ioread16(&cfg->queue_enable);
> +}
> +
> +bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid)
> +{
> +       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
> +       bool queue_enable;
> +
> +       vp_iowrite16(qid, &cfg->queue_select);
> +       queue_enable = vp_ioread16(&cfg->queue_enable);
> +
> +       return (bool)queue_enable;
>  }
>
>  int ifcvf_start_hw(struct ifcvf_hw *hw)
>  {
> -       ifcvf_reset(hw);
>         ifcvf_add_status(hw, VIRTIO_CONFIG_S_ACKNOWLEDGE);
>         ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER);
>
>         if (ifcvf_config_features(hw) < 0)
>                 return -EINVAL;
>
> -       if (ifcvf_hw_enable(hw) < 0)
> -               return -EINVAL;
> -
>         ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER_OK);
>
>         return 0;
>  }
>
> +static void ifcvf_reset_vring(struct ifcvf_hw *hw)
> +{
> +       int i;
> +
> +       for (i = 0; i < hw->nr_vring; i++) {
> +               hw->vring[i].cb.callback = NULL;
> +               hw->vring[i].cb.private = NULL;
> +               ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR);
> +       }
> +}
> +
> +static void ifcvf_reset_config_handler(struct ifcvf_hw *hw)
> +{
> +       hw->config_cb.callback = NULL;
> +       hw->config_cb.private = NULL;
> +       ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);

Do we need to synchronize with the IRQ here?

> +}
> +
>  void ifcvf_stop_hw(struct ifcvf_hw *hw)
>  {
> -       ifcvf_hw_disable(hw);
> -       ifcvf_reset(hw);
> +       ifcvf_reset_vring(hw);
> +       ifcvf_reset_config_handler(hw);
>  }
>
>  void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
> diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
> index 115b61f4924b..41d86985361f 100644
> --- a/drivers/vdpa/ifcvf/ifcvf_base.h
> +++ b/drivers/vdpa/ifcvf/ifcvf_base.h
> @@ -49,12 +49,6 @@
>  #define MSIX_VECTOR_DEV_SHARED                 3
>
>  struct vring_info {
> -       u64 desc;
> -       u64 avail;
> -       u64 used;
> -       u16 size;
> -       u16 last_avail_idx;
> -       bool ready;
>         void __iomem *notify_addr;
>         phys_addr_t notify_pa;
>         u32 irq;
> @@ -131,6 +125,11 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
>  struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
>  int ifcvf_probed_virtio_net(struct ifcvf_hw *hw);
>  u32 ifcvf_get_config_size(struct ifcvf_hw *hw);
> +int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
> +                        u64 driver_area, u64 device_area);
>  u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector);
>  u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector);
> +void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num);
> +void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready);
> +bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid);
>  #endif /* _IFCVF_H_ */
> diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
> index 4366320fb68d..e442aa11333e 100644
> --- a/drivers/vdpa/ifcvf/ifcvf_main.c
> +++ b/drivers/vdpa/ifcvf/ifcvf_main.c
> @@ -374,37 +374,6 @@ static int ifcvf_start_datapath(void *private)
>         return ret;
>  }
>
> -static int ifcvf_stop_datapath(void *private)
> -{
> -       struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
> -       int i;
> -
> -       for (i = 0; i < vf->nr_vring; i++)
> -               vf->vring[i].cb.callback = NULL;
> -
> -       ifcvf_stop_hw(vf);
> -
> -       return 0;
> -}
> -
> -static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
> -{
> -       struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
> -       int i;
> -
> -       for (i = 0; i < vf->nr_vring; i++) {
> -               vf->vring[i].last_avail_idx = 0;
> -               vf->vring[i].desc = 0;
> -               vf->vring[i].avail = 0;
> -               vf->vring[i].used = 0;
> -               vf->vring[i].ready = 0;
> -               vf->vring[i].cb.callback = NULL;
> -               vf->vring[i].cb.private = NULL;
> -       }
> -
> -       ifcvf_reset(vf);
> -}
> -
>  static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
>  {
>         return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
> @@ -477,6 +446,8 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
>         if (status_old == status)
>                 return;
>
> +       ifcvf_set_status(vf, status);
> +
>         if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
>             !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
>                 ret = ifcvf_request_irq(adapter);

Does this mean e.g for DRIVER_OK the device may work before the
interrupt handler is requested?

Looks racy.

Thanks

> @@ -493,7 +464,6 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
>                                   status);
>         }
>
> -       ifcvf_set_status(vf, status);
>  }
>
>  static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
> @@ -509,12 +479,10 @@ static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
>         if (status_old == 0)
>                 return 0;
>
> -       if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
> -               ifcvf_stop_datapath(adapter);
> -               ifcvf_free_irq(adapter);
> -       }
> +       ifcvf_stop_hw(vf);
> +       ifcvf_free_irq(adapter);
>
> -       ifcvf_reset_vring(adapter);
> +       ifcvf_reset(vf);
>
>         return 0;
>  }
> @@ -554,14 +522,17 @@ static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
>  {
>         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>
> -       vf->vring[qid].ready = ready;
> +       ifcvf_set_vq_ready(vf, qid, ready);
>  }
>
>  static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
>  {
>         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
> +       bool ready;
>
> -       return vf->vring[qid].ready;
> +       ready = ifcvf_get_vq_ready(vf, qid);
> +
> +       return ready;
>  }
>
>  static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
> @@ -569,7 +540,7 @@ static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
>  {
>         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>
> -       vf->vring[qid].size = num;
> +       ifcvf_set_vq_num(vf, qid, num);
>  }
>
>  static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
> @@ -578,11 +549,7 @@ static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
>  {
>         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>
> -       vf->vring[qid].desc = desc_area;
> -       vf->vring[qid].avail = driver_area;
> -       vf->vring[qid].used = device_area;
> -
> -       return 0;
> +       return ifcvf_set_vq_address(vf, qid, desc_area, driver_area, device_area);
>  }
>
>  static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
> --
> 2.31.1
>


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] vDPA/ifcvf: allow userspace to suspend a queue
@ 2022-04-13  8:25   ` Jason Wang
  0 siblings, 0 replies; 10+ messages in thread
From: Jason Wang @ 2022-04-13  8:25 UTC (permalink / raw)
  To: Zhu Lingshan; +Cc: netdev, virtualization, mst

On Mon, Apr 11, 2022 at 11:18 AM Zhu Lingshan <lingshan.zhu@intel.com> wrote:
>
> Formerly, ifcvf driver has implemented a lazy-initialization mechanism
> for the virtqueues, it would store all virtqueue config fields that
> passed down from the userspace, then load them to the virtqueues and
> enable the queues upon DRIVER_OK.
>
> To allow the userspace to suspend a virtqueue,
> this commit passes queue_enable to the virtqueue directly through
> set_vq_ready().
>
> This feature requires and this commits implementing all virtqueue
> ops(set_vq_addr, set_vq_num and set_vq_ready) to take immediate
> actions than lazy-initialization, so ifcvf_hw_enable() is retired.
>
> To avoid losing virtqueue configurations caused by multiple
> rounds of reset(), this commit also refactors thed evice reset
> routine, now it simply reset the config handler and the virtqueues,
> and only once device-reset().
>
> Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
> ---
>  drivers/vdpa/ifcvf/ifcvf_base.c | 94 ++++++++++++++++++++-------------
>  drivers/vdpa/ifcvf/ifcvf_base.h | 11 ++--
>  drivers/vdpa/ifcvf/ifcvf_main.c | 57 +++++---------------
>  3 files changed, 75 insertions(+), 87 deletions(-)
>
> diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
> index 48c4dadb0c7c..19eb0dcac123 100644
> --- a/drivers/vdpa/ifcvf/ifcvf_base.c
> +++ b/drivers/vdpa/ifcvf/ifcvf_base.c
> @@ -175,16 +175,12 @@ u8 ifcvf_get_status(struct ifcvf_hw *hw)
>  void ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
>  {
>         vp_iowrite8(status, &hw->common_cfg->device_status);
> +       vp_ioread8(&hw->common_cfg->device_status);

This looks confusing, the name of the function is to set the status
but what actually implemented here is to get the status.

>  }
>
>  void ifcvf_reset(struct ifcvf_hw *hw)
>  {
> -       hw->config_cb.callback = NULL;
> -       hw->config_cb.private = NULL;
> -
>         ifcvf_set_status(hw, 0);
> -       /* flush set_status, make sure VF is stopped, reset */
> -       ifcvf_get_status(hw);
>  }
>
>  static void ifcvf_add_status(struct ifcvf_hw *hw, u8 status)
> @@ -331,68 +327,94 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
>         ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
>         q_pair_id = qid / hw->nr_vring;
>         avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
> -       hw->vring[qid].last_avail_idx = num;
>         vp_iowrite16(num, avail_idx_addr);
> +       vp_ioread16(avail_idx_addr);

This looks like a bug fix.

>
>         return 0;
>  }
>
> -static int ifcvf_hw_enable(struct ifcvf_hw *hw)
> +void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num)
>  {
> -       struct virtio_pci_common_cfg __iomem *cfg;
> -       u32 i;
> +       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
>
> -       cfg = hw->common_cfg;
> -       for (i = 0; i < hw->nr_vring; i++) {
> -               if (!hw->vring[i].ready)
> -                       break;
> +       vp_iowrite16(qid, &cfg->queue_select);
> +       vp_iowrite16(num, &cfg->queue_size);
> +       vp_ioread16(&cfg->queue_size);
> +}
>
> -               vp_iowrite16(i, &cfg->queue_select);
> -               vp_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
> -                                    &cfg->queue_desc_hi);
> -               vp_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
> -                                     &cfg->queue_avail_hi);
> -               vp_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
> -                                    &cfg->queue_used_hi);
> -               vp_iowrite16(hw->vring[i].size, &cfg->queue_size);
> -               ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx);
> -               vp_iowrite16(1, &cfg->queue_enable);
> -       }
> +int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
> +                        u64 driver_area, u64 device_area)
> +{
> +       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
> +
> +       vp_iowrite16(qid, &cfg->queue_select);
> +       vp_iowrite64_twopart(desc_area, &cfg->queue_desc_lo,
> +                            &cfg->queue_desc_hi);
> +       vp_iowrite64_twopart(driver_area, &cfg->queue_avail_lo,
> +                            &cfg->queue_avail_hi);
> +       vp_iowrite64_twopart(device_area, &cfg->queue_used_lo,
> +                            &cfg->queue_used_hi);
> +       /* to flush IO */
> +       vp_ioread16(&cfg->queue_select);

Why do we need to flush I/O here?

>
>         return 0;
>  }
>
> -static void ifcvf_hw_disable(struct ifcvf_hw *hw)
> +void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready)
>  {
> -       u32 i;
> +       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
>
> -       ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
> -       for (i = 0; i < hw->nr_vring; i++) {
> -               ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR);
> -       }
> +       vp_iowrite16(qid, &cfg->queue_select);
> +       vp_iowrite16(ready, &cfg->queue_enable);

I think we need a comment to explain that IFCVF can support write to
queue_enable since it's not allowed by the virtio spec.

> +       vp_ioread16(&cfg->queue_enable);
> +}
> +
> +bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid)
> +{
> +       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
> +       bool queue_enable;
> +
> +       vp_iowrite16(qid, &cfg->queue_select);
> +       queue_enable = vp_ioread16(&cfg->queue_enable);
> +
> +       return (bool)queue_enable;
>  }
>
>  int ifcvf_start_hw(struct ifcvf_hw *hw)
>  {
> -       ifcvf_reset(hw);
>         ifcvf_add_status(hw, VIRTIO_CONFIG_S_ACKNOWLEDGE);
>         ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER);
>
>         if (ifcvf_config_features(hw) < 0)
>                 return -EINVAL;
>
> -       if (ifcvf_hw_enable(hw) < 0)
> -               return -EINVAL;
> -
>         ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER_OK);
>
>         return 0;
>  }
>
> +static void ifcvf_reset_vring(struct ifcvf_hw *hw)
> +{
> +       int i;
> +
> +       for (i = 0; i < hw->nr_vring; i++) {
> +               hw->vring[i].cb.callback = NULL;
> +               hw->vring[i].cb.private = NULL;
> +               ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR);
> +       }
> +}
> +
> +static void ifcvf_reset_config_handler(struct ifcvf_hw *hw)
> +{
> +       hw->config_cb.callback = NULL;
> +       hw->config_cb.private = NULL;
> +       ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);

Do we need to synchronize with the IRQ here?

> +}
> +
>  void ifcvf_stop_hw(struct ifcvf_hw *hw)
>  {
> -       ifcvf_hw_disable(hw);
> -       ifcvf_reset(hw);
> +       ifcvf_reset_vring(hw);
> +       ifcvf_reset_config_handler(hw);
>  }
>
>  void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
> diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
> index 115b61f4924b..41d86985361f 100644
> --- a/drivers/vdpa/ifcvf/ifcvf_base.h
> +++ b/drivers/vdpa/ifcvf/ifcvf_base.h
> @@ -49,12 +49,6 @@
>  #define MSIX_VECTOR_DEV_SHARED                 3
>
>  struct vring_info {
> -       u64 desc;
> -       u64 avail;
> -       u64 used;
> -       u16 size;
> -       u16 last_avail_idx;
> -       bool ready;
>         void __iomem *notify_addr;
>         phys_addr_t notify_pa;
>         u32 irq;
> @@ -131,6 +125,11 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
>  struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
>  int ifcvf_probed_virtio_net(struct ifcvf_hw *hw);
>  u32 ifcvf_get_config_size(struct ifcvf_hw *hw);
> +int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
> +                        u64 driver_area, u64 device_area);
>  u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector);
>  u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector);
> +void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num);
> +void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready);
> +bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid);
>  #endif /* _IFCVF_H_ */
> diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
> index 4366320fb68d..e442aa11333e 100644
> --- a/drivers/vdpa/ifcvf/ifcvf_main.c
> +++ b/drivers/vdpa/ifcvf/ifcvf_main.c
> @@ -374,37 +374,6 @@ static int ifcvf_start_datapath(void *private)
>         return ret;
>  }
>
> -static int ifcvf_stop_datapath(void *private)
> -{
> -       struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
> -       int i;
> -
> -       for (i = 0; i < vf->nr_vring; i++)
> -               vf->vring[i].cb.callback = NULL;
> -
> -       ifcvf_stop_hw(vf);
> -
> -       return 0;
> -}
> -
> -static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
> -{
> -       struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
> -       int i;
> -
> -       for (i = 0; i < vf->nr_vring; i++) {
> -               vf->vring[i].last_avail_idx = 0;
> -               vf->vring[i].desc = 0;
> -               vf->vring[i].avail = 0;
> -               vf->vring[i].used = 0;
> -               vf->vring[i].ready = 0;
> -               vf->vring[i].cb.callback = NULL;
> -               vf->vring[i].cb.private = NULL;
> -       }
> -
> -       ifcvf_reset(vf);
> -}
> -
>  static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
>  {
>         return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
> @@ -477,6 +446,8 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
>         if (status_old == status)
>                 return;
>
> +       ifcvf_set_status(vf, status);
> +
>         if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
>             !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
>                 ret = ifcvf_request_irq(adapter);

Does this mean e.g for DRIVER_OK the device may work before the
interrupt handler is requested?

Looks racy.

Thanks

> @@ -493,7 +464,6 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
>                                   status);
>         }
>
> -       ifcvf_set_status(vf, status);
>  }
>
>  static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
> @@ -509,12 +479,10 @@ static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
>         if (status_old == 0)
>                 return 0;
>
> -       if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
> -               ifcvf_stop_datapath(adapter);
> -               ifcvf_free_irq(adapter);
> -       }
> +       ifcvf_stop_hw(vf);
> +       ifcvf_free_irq(adapter);
>
> -       ifcvf_reset_vring(adapter);
> +       ifcvf_reset(vf);
>
>         return 0;
>  }
> @@ -554,14 +522,17 @@ static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
>  {
>         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>
> -       vf->vring[qid].ready = ready;
> +       ifcvf_set_vq_ready(vf, qid, ready);
>  }
>
>  static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
>  {
>         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
> +       bool ready;
>
> -       return vf->vring[qid].ready;
> +       ready = ifcvf_get_vq_ready(vf, qid);
> +
> +       return ready;
>  }
>
>  static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
> @@ -569,7 +540,7 @@ static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
>  {
>         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>
> -       vf->vring[qid].size = num;
> +       ifcvf_set_vq_num(vf, qid, num);
>  }
>
>  static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
> @@ -578,11 +549,7 @@ static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
>  {
>         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>
> -       vf->vring[qid].desc = desc_area;
> -       vf->vring[qid].avail = driver_area;
> -       vf->vring[qid].used = device_area;
> -
> -       return 0;
> +       return ifcvf_set_vq_address(vf, qid, desc_area, driver_area, device_area);
>  }
>
>  static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
> --
> 2.31.1
>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] vDPA/ifcvf: allow userspace to suspend a queue
  2022-04-13  8:25   ` Jason Wang
@ 2022-04-13  8:54     ` Michael S. Tsirkin
  -1 siblings, 0 replies; 10+ messages in thread
From: Michael S. Tsirkin @ 2022-04-13  8:54 UTC (permalink / raw)
  To: Jason Wang; +Cc: Zhu Lingshan, virtualization, netdev

On Wed, Apr 13, 2022 at 04:25:22PM +0800, Jason Wang wrote:
> On Mon, Apr 11, 2022 at 11:18 AM Zhu Lingshan <lingshan.zhu@intel.com> wrote:
> >
> > Formerly, ifcvf driver has implemented a lazy-initialization mechanism
> > for the virtqueues, it would store all virtqueue config fields that
> > passed down from the userspace, then load them to the virtqueues and
> > enable the queues upon DRIVER_OK.
> >
> > To allow the userspace to suspend a virtqueue,
> > this commit passes queue_enable to the virtqueue directly through
> > set_vq_ready().
> >
> > This feature requires and this commits implementing all virtqueue
> > ops(set_vq_addr, set_vq_num and set_vq_ready) to take immediate
> > actions than lazy-initialization, so ifcvf_hw_enable() is retired.
> >
> > To avoid losing virtqueue configurations caused by multiple
> > rounds of reset(), this commit also refactors thed evice reset
> > routine, now it simply reset the config handler and the virtqueues,
> > and only once device-reset().
> >
> > Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
> > ---
> >  drivers/vdpa/ifcvf/ifcvf_base.c | 94 ++++++++++++++++++++-------------
> >  drivers/vdpa/ifcvf/ifcvf_base.h | 11 ++--
> >  drivers/vdpa/ifcvf/ifcvf_main.c | 57 +++++---------------
> >  3 files changed, 75 insertions(+), 87 deletions(-)
> >
> > diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
> > index 48c4dadb0c7c..19eb0dcac123 100644
> > --- a/drivers/vdpa/ifcvf/ifcvf_base.c
> > +++ b/drivers/vdpa/ifcvf/ifcvf_base.c
> > @@ -175,16 +175,12 @@ u8 ifcvf_get_status(struct ifcvf_hw *hw)
> >  void ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
> >  {
> >         vp_iowrite8(status, &hw->common_cfg->device_status);
> > +       vp_ioread8(&hw->common_cfg->device_status);
> 
> This looks confusing, the name of the function is to set the status
> but what actually implemented here is to get the status.
> 
> >  }
> >
> >  void ifcvf_reset(struct ifcvf_hw *hw)
> >  {
> > -       hw->config_cb.callback = NULL;
> > -       hw->config_cb.private = NULL;
> > -
> >         ifcvf_set_status(hw, 0);
> > -       /* flush set_status, make sure VF is stopped, reset */
> > -       ifcvf_get_status(hw);
> >  }
> >
> >  static void ifcvf_add_status(struct ifcvf_hw *hw, u8 status)
> > @@ -331,68 +327,94 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
> >         ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
> >         q_pair_id = qid / hw->nr_vring;
> >         avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
> > -       hw->vring[qid].last_avail_idx = num;
> >         vp_iowrite16(num, avail_idx_addr);
> > +       vp_ioread16(avail_idx_addr);
> 
> This looks like a bug fix.

is this to flush out the status write?  pls add a comment
explaining when and why it's needed.

> >
> >         return 0;
> >  }
> >
> > -static int ifcvf_hw_enable(struct ifcvf_hw *hw)
> > +void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num)
> >  {
> > -       struct virtio_pci_common_cfg __iomem *cfg;
> > -       u32 i;
> > +       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
> >
> > -       cfg = hw->common_cfg;
> > -       for (i = 0; i < hw->nr_vring; i++) {
> > -               if (!hw->vring[i].ready)
> > -                       break;
> > +       vp_iowrite16(qid, &cfg->queue_select);
> > +       vp_iowrite16(num, &cfg->queue_size);
> > +       vp_ioread16(&cfg->queue_size);
> > +}
> >
> > -               vp_iowrite16(i, &cfg->queue_select);
> > -               vp_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
> > -                                    &cfg->queue_desc_hi);
> > -               vp_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
> > -                                     &cfg->queue_avail_hi);
> > -               vp_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
> > -                                    &cfg->queue_used_hi);
> > -               vp_iowrite16(hw->vring[i].size, &cfg->queue_size);
> > -               ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx);
> > -               vp_iowrite16(1, &cfg->queue_enable);
> > -       }
> > +int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
> > +                        u64 driver_area, u64 device_area)
> > +{
> > +       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
> > +
> > +       vp_iowrite16(qid, &cfg->queue_select);
> > +       vp_iowrite64_twopart(desc_area, &cfg->queue_desc_lo,
> > +                            &cfg->queue_desc_hi);
> > +       vp_iowrite64_twopart(driver_area, &cfg->queue_avail_lo,
> > +                            &cfg->queue_avail_hi);
> > +       vp_iowrite64_twopart(device_area, &cfg->queue_used_lo,
> > +                            &cfg->queue_used_hi);
> > +       /* to flush IO */
> > +       vp_ioread16(&cfg->queue_select);
> 
> Why do we need to flush I/O here?
> 
> >
> >         return 0;
> >  }
> >
> > -static void ifcvf_hw_disable(struct ifcvf_hw *hw)
> > +void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready)
> >  {
> > -       u32 i;
> > +       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
> >
> > -       ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
> > -       for (i = 0; i < hw->nr_vring; i++) {
> > -               ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR);
> > -       }
> > +       vp_iowrite16(qid, &cfg->queue_select);
> > +       vp_iowrite16(ready, &cfg->queue_enable);
> 
> I think we need a comment to explain that IFCVF can support write to
> queue_enable since it's not allowed by the virtio spec.


I think you mean writing 0 there. writing 1 is allowed.


> 
> > +       vp_ioread16(&cfg->queue_enable);
> > +}
> > +
> > +bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid)
> > +{
> > +       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
> > +       bool queue_enable;
> > +
> > +       vp_iowrite16(qid, &cfg->queue_select);
> > +       queue_enable = vp_ioread16(&cfg->queue_enable);
> > +
> > +       return (bool)queue_enable;
> >  }
> >
> >  int ifcvf_start_hw(struct ifcvf_hw *hw)
> >  {
> > -       ifcvf_reset(hw);
> >         ifcvf_add_status(hw, VIRTIO_CONFIG_S_ACKNOWLEDGE);
> >         ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER);
> >
> >         if (ifcvf_config_features(hw) < 0)
> >                 return -EINVAL;
> >
> > -       if (ifcvf_hw_enable(hw) < 0)
> > -               return -EINVAL;
> > -
> >         ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER_OK);
> >
> >         return 0;
> >  }
> >
> > +static void ifcvf_reset_vring(struct ifcvf_hw *hw)
> > +{
> > +       int i;
> > +
> > +       for (i = 0; i < hw->nr_vring; i++) {
> > +               hw->vring[i].cb.callback = NULL;
> > +               hw->vring[i].cb.private = NULL;
> > +               ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR);
> > +       }
> > +}
> > +
> > +static void ifcvf_reset_config_handler(struct ifcvf_hw *hw)
> > +{
> > +       hw->config_cb.callback = NULL;
> > +       hw->config_cb.private = NULL;
> > +       ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
> 
> Do we need to synchronize with the IRQ here?
> 
> > +}
> > +
> >  void ifcvf_stop_hw(struct ifcvf_hw *hw)
> >  {
> > -       ifcvf_hw_disable(hw);
> > -       ifcvf_reset(hw);
> > +       ifcvf_reset_vring(hw);
> > +       ifcvf_reset_config_handler(hw);
> >  }
> >
> >  void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
> > diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
> > index 115b61f4924b..41d86985361f 100644
> > --- a/drivers/vdpa/ifcvf/ifcvf_base.h
> > +++ b/drivers/vdpa/ifcvf/ifcvf_base.h
> > @@ -49,12 +49,6 @@
> >  #define MSIX_VECTOR_DEV_SHARED                 3
> >
> >  struct vring_info {
> > -       u64 desc;
> > -       u64 avail;
> > -       u64 used;
> > -       u16 size;
> > -       u16 last_avail_idx;
> > -       bool ready;
> >         void __iomem *notify_addr;
> >         phys_addr_t notify_pa;
> >         u32 irq;
> > @@ -131,6 +125,11 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
> >  struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
> >  int ifcvf_probed_virtio_net(struct ifcvf_hw *hw);
> >  u32 ifcvf_get_config_size(struct ifcvf_hw *hw);
> > +int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
> > +                        u64 driver_area, u64 device_area);
> >  u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector);
> >  u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector);
> > +void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num);
> > +void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready);
> > +bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid);
> >  #endif /* _IFCVF_H_ */
> > diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
> > index 4366320fb68d..e442aa11333e 100644
> > --- a/drivers/vdpa/ifcvf/ifcvf_main.c
> > +++ b/drivers/vdpa/ifcvf/ifcvf_main.c
> > @@ -374,37 +374,6 @@ static int ifcvf_start_datapath(void *private)
> >         return ret;
> >  }
> >
> > -static int ifcvf_stop_datapath(void *private)
> > -{
> > -       struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
> > -       int i;
> > -
> > -       for (i = 0; i < vf->nr_vring; i++)
> > -               vf->vring[i].cb.callback = NULL;
> > -
> > -       ifcvf_stop_hw(vf);
> > -
> > -       return 0;
> > -}
> > -
> > -static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
> > -{
> > -       struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
> > -       int i;
> > -
> > -       for (i = 0; i < vf->nr_vring; i++) {
> > -               vf->vring[i].last_avail_idx = 0;
> > -               vf->vring[i].desc = 0;
> > -               vf->vring[i].avail = 0;
> > -               vf->vring[i].used = 0;
> > -               vf->vring[i].ready = 0;
> > -               vf->vring[i].cb.callback = NULL;
> > -               vf->vring[i].cb.private = NULL;
> > -       }
> > -
> > -       ifcvf_reset(vf);
> > -}
> > -
> >  static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
> >  {
> >         return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
> > @@ -477,6 +446,8 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
> >         if (status_old == status)
> >                 return;
> >
> > +       ifcvf_set_status(vf, status);
> > +
> >         if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
> >             !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
> >                 ret = ifcvf_request_irq(adapter);
> 
> Does this mean e.g for DRIVER_OK the device may work before the
> interrupt handler is requested?
> 
> Looks racy.
> 
> Thanks
> 
> > @@ -493,7 +464,6 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
> >                                   status);
> >         }
> >
> > -       ifcvf_set_status(vf, status);
> >  }
> >
> >  static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
> > @@ -509,12 +479,10 @@ static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
> >         if (status_old == 0)
> >                 return 0;
> >
> > -       if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
> > -               ifcvf_stop_datapath(adapter);
> > -               ifcvf_free_irq(adapter);
> > -       }
> > +       ifcvf_stop_hw(vf);
> > +       ifcvf_free_irq(adapter);
> >
> > -       ifcvf_reset_vring(adapter);
> > +       ifcvf_reset(vf);
> >
> >         return 0;
> >  }
> > @@ -554,14 +522,17 @@ static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
> >  {
> >         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
> >
> > -       vf->vring[qid].ready = ready;
> > +       ifcvf_set_vq_ready(vf, qid, ready);
> >  }
> >
> >  static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
> >  {
> >         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
> > +       bool ready;
> >
> > -       return vf->vring[qid].ready;
> > +       ready = ifcvf_get_vq_ready(vf, qid);
> > +
> > +       return ready;
> >  }
> >
> >  static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
> > @@ -569,7 +540,7 @@ static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
> >  {
> >         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
> >
> > -       vf->vring[qid].size = num;
> > +       ifcvf_set_vq_num(vf, qid, num);
> >  }
> >
> >  static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
> > @@ -578,11 +549,7 @@ static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
> >  {
> >         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
> >
> > -       vf->vring[qid].desc = desc_area;
> > -       vf->vring[qid].avail = driver_area;
> > -       vf->vring[qid].used = device_area;
> > -
> > -       return 0;
> > +       return ifcvf_set_vq_address(vf, qid, desc_area, driver_area, device_area);
> >  }
> >
> >  static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
> > --
> > 2.31.1
> >


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] vDPA/ifcvf: allow userspace to suspend a queue
@ 2022-04-13  8:54     ` Michael S. Tsirkin
  0 siblings, 0 replies; 10+ messages in thread
From: Michael S. Tsirkin @ 2022-04-13  8:54 UTC (permalink / raw)
  To: Jason Wang; +Cc: netdev, Zhu Lingshan, virtualization

On Wed, Apr 13, 2022 at 04:25:22PM +0800, Jason Wang wrote:
> On Mon, Apr 11, 2022 at 11:18 AM Zhu Lingshan <lingshan.zhu@intel.com> wrote:
> >
> > Formerly, ifcvf driver has implemented a lazy-initialization mechanism
> > for the virtqueues, it would store all virtqueue config fields that
> > passed down from the userspace, then load them to the virtqueues and
> > enable the queues upon DRIVER_OK.
> >
> > To allow the userspace to suspend a virtqueue,
> > this commit passes queue_enable to the virtqueue directly through
> > set_vq_ready().
> >
> > This feature requires and this commits implementing all virtqueue
> > ops(set_vq_addr, set_vq_num and set_vq_ready) to take immediate
> > actions than lazy-initialization, so ifcvf_hw_enable() is retired.
> >
> > To avoid losing virtqueue configurations caused by multiple
> > rounds of reset(), this commit also refactors thed evice reset
> > routine, now it simply reset the config handler and the virtqueues,
> > and only once device-reset().
> >
> > Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
> > ---
> >  drivers/vdpa/ifcvf/ifcvf_base.c | 94 ++++++++++++++++++++-------------
> >  drivers/vdpa/ifcvf/ifcvf_base.h | 11 ++--
> >  drivers/vdpa/ifcvf/ifcvf_main.c | 57 +++++---------------
> >  3 files changed, 75 insertions(+), 87 deletions(-)
> >
> > diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
> > index 48c4dadb0c7c..19eb0dcac123 100644
> > --- a/drivers/vdpa/ifcvf/ifcvf_base.c
> > +++ b/drivers/vdpa/ifcvf/ifcvf_base.c
> > @@ -175,16 +175,12 @@ u8 ifcvf_get_status(struct ifcvf_hw *hw)
> >  void ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
> >  {
> >         vp_iowrite8(status, &hw->common_cfg->device_status);
> > +       vp_ioread8(&hw->common_cfg->device_status);
> 
> This looks confusing, the name of the function is to set the status
> but what actually implemented here is to get the status.
> 
> >  }
> >
> >  void ifcvf_reset(struct ifcvf_hw *hw)
> >  {
> > -       hw->config_cb.callback = NULL;
> > -       hw->config_cb.private = NULL;
> > -
> >         ifcvf_set_status(hw, 0);
> > -       /* flush set_status, make sure VF is stopped, reset */
> > -       ifcvf_get_status(hw);
> >  }
> >
> >  static void ifcvf_add_status(struct ifcvf_hw *hw, u8 status)
> > @@ -331,68 +327,94 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
> >         ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
> >         q_pair_id = qid / hw->nr_vring;
> >         avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
> > -       hw->vring[qid].last_avail_idx = num;
> >         vp_iowrite16(num, avail_idx_addr);
> > +       vp_ioread16(avail_idx_addr);
> 
> This looks like a bug fix.

is this to flush out the status write?  pls add a comment
explaining when and why it's needed.

> >
> >         return 0;
> >  }
> >
> > -static int ifcvf_hw_enable(struct ifcvf_hw *hw)
> > +void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num)
> >  {
> > -       struct virtio_pci_common_cfg __iomem *cfg;
> > -       u32 i;
> > +       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
> >
> > -       cfg = hw->common_cfg;
> > -       for (i = 0; i < hw->nr_vring; i++) {
> > -               if (!hw->vring[i].ready)
> > -                       break;
> > +       vp_iowrite16(qid, &cfg->queue_select);
> > +       vp_iowrite16(num, &cfg->queue_size);
> > +       vp_ioread16(&cfg->queue_size);
> > +}
> >
> > -               vp_iowrite16(i, &cfg->queue_select);
> > -               vp_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
> > -                                    &cfg->queue_desc_hi);
> > -               vp_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
> > -                                     &cfg->queue_avail_hi);
> > -               vp_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
> > -                                    &cfg->queue_used_hi);
> > -               vp_iowrite16(hw->vring[i].size, &cfg->queue_size);
> > -               ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx);
> > -               vp_iowrite16(1, &cfg->queue_enable);
> > -       }
> > +int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
> > +                        u64 driver_area, u64 device_area)
> > +{
> > +       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
> > +
> > +       vp_iowrite16(qid, &cfg->queue_select);
> > +       vp_iowrite64_twopart(desc_area, &cfg->queue_desc_lo,
> > +                            &cfg->queue_desc_hi);
> > +       vp_iowrite64_twopart(driver_area, &cfg->queue_avail_lo,
> > +                            &cfg->queue_avail_hi);
> > +       vp_iowrite64_twopart(device_area, &cfg->queue_used_lo,
> > +                            &cfg->queue_used_hi);
> > +       /* to flush IO */
> > +       vp_ioread16(&cfg->queue_select);
> 
> Why do we need to flush I/O here?
> 
> >
> >         return 0;
> >  }
> >
> > -static void ifcvf_hw_disable(struct ifcvf_hw *hw)
> > +void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready)
> >  {
> > -       u32 i;
> > +       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
> >
> > -       ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
> > -       for (i = 0; i < hw->nr_vring; i++) {
> > -               ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR);
> > -       }
> > +       vp_iowrite16(qid, &cfg->queue_select);
> > +       vp_iowrite16(ready, &cfg->queue_enable);
> 
> I think we need a comment to explain that IFCVF can support write to
> queue_enable since it's not allowed by the virtio spec.


I think you mean writing 0 there. writing 1 is allowed.


> 
> > +       vp_ioread16(&cfg->queue_enable);
> > +}
> > +
> > +bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid)
> > +{
> > +       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
> > +       bool queue_enable;
> > +
> > +       vp_iowrite16(qid, &cfg->queue_select);
> > +       queue_enable = vp_ioread16(&cfg->queue_enable);
> > +
> > +       return (bool)queue_enable;
> >  }
> >
> >  int ifcvf_start_hw(struct ifcvf_hw *hw)
> >  {
> > -       ifcvf_reset(hw);
> >         ifcvf_add_status(hw, VIRTIO_CONFIG_S_ACKNOWLEDGE);
> >         ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER);
> >
> >         if (ifcvf_config_features(hw) < 0)
> >                 return -EINVAL;
> >
> > -       if (ifcvf_hw_enable(hw) < 0)
> > -               return -EINVAL;
> > -
> >         ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER_OK);
> >
> >         return 0;
> >  }
> >
> > +static void ifcvf_reset_vring(struct ifcvf_hw *hw)
> > +{
> > +       int i;
> > +
> > +       for (i = 0; i < hw->nr_vring; i++) {
> > +               hw->vring[i].cb.callback = NULL;
> > +               hw->vring[i].cb.private = NULL;
> > +               ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR);
> > +       }
> > +}
> > +
> > +static void ifcvf_reset_config_handler(struct ifcvf_hw *hw)
> > +{
> > +       hw->config_cb.callback = NULL;
> > +       hw->config_cb.private = NULL;
> > +       ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
> 
> Do we need to synchronize with the IRQ here?
> 
> > +}
> > +
> >  void ifcvf_stop_hw(struct ifcvf_hw *hw)
> >  {
> > -       ifcvf_hw_disable(hw);
> > -       ifcvf_reset(hw);
> > +       ifcvf_reset_vring(hw);
> > +       ifcvf_reset_config_handler(hw);
> >  }
> >
> >  void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
> > diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
> > index 115b61f4924b..41d86985361f 100644
> > --- a/drivers/vdpa/ifcvf/ifcvf_base.h
> > +++ b/drivers/vdpa/ifcvf/ifcvf_base.h
> > @@ -49,12 +49,6 @@
> >  #define MSIX_VECTOR_DEV_SHARED                 3
> >
> >  struct vring_info {
> > -       u64 desc;
> > -       u64 avail;
> > -       u64 used;
> > -       u16 size;
> > -       u16 last_avail_idx;
> > -       bool ready;
> >         void __iomem *notify_addr;
> >         phys_addr_t notify_pa;
> >         u32 irq;
> > @@ -131,6 +125,11 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
> >  struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
> >  int ifcvf_probed_virtio_net(struct ifcvf_hw *hw);
> >  u32 ifcvf_get_config_size(struct ifcvf_hw *hw);
> > +int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
> > +                        u64 driver_area, u64 device_area);
> >  u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector);
> >  u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector);
> > +void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num);
> > +void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready);
> > +bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid);
> >  #endif /* _IFCVF_H_ */
> > diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
> > index 4366320fb68d..e442aa11333e 100644
> > --- a/drivers/vdpa/ifcvf/ifcvf_main.c
> > +++ b/drivers/vdpa/ifcvf/ifcvf_main.c
> > @@ -374,37 +374,6 @@ static int ifcvf_start_datapath(void *private)
> >         return ret;
> >  }
> >
> > -static int ifcvf_stop_datapath(void *private)
> > -{
> > -       struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
> > -       int i;
> > -
> > -       for (i = 0; i < vf->nr_vring; i++)
> > -               vf->vring[i].cb.callback = NULL;
> > -
> > -       ifcvf_stop_hw(vf);
> > -
> > -       return 0;
> > -}
> > -
> > -static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
> > -{
> > -       struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
> > -       int i;
> > -
> > -       for (i = 0; i < vf->nr_vring; i++) {
> > -               vf->vring[i].last_avail_idx = 0;
> > -               vf->vring[i].desc = 0;
> > -               vf->vring[i].avail = 0;
> > -               vf->vring[i].used = 0;
> > -               vf->vring[i].ready = 0;
> > -               vf->vring[i].cb.callback = NULL;
> > -               vf->vring[i].cb.private = NULL;
> > -       }
> > -
> > -       ifcvf_reset(vf);
> > -}
> > -
> >  static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
> >  {
> >         return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
> > @@ -477,6 +446,8 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
> >         if (status_old == status)
> >                 return;
> >
> > +       ifcvf_set_status(vf, status);
> > +
> >         if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
> >             !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
> >                 ret = ifcvf_request_irq(adapter);
> 
> Does this mean e.g for DRIVER_OK the device may work before the
> interrupt handler is requested?
> 
> Looks racy.
> 
> Thanks
> 
> > @@ -493,7 +464,6 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
> >                                   status);
> >         }
> >
> > -       ifcvf_set_status(vf, status);
> >  }
> >
> >  static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
> > @@ -509,12 +479,10 @@ static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
> >         if (status_old == 0)
> >                 return 0;
> >
> > -       if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
> > -               ifcvf_stop_datapath(adapter);
> > -               ifcvf_free_irq(adapter);
> > -       }
> > +       ifcvf_stop_hw(vf);
> > +       ifcvf_free_irq(adapter);
> >
> > -       ifcvf_reset_vring(adapter);
> > +       ifcvf_reset(vf);
> >
> >         return 0;
> >  }
> > @@ -554,14 +522,17 @@ static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
> >  {
> >         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
> >
> > -       vf->vring[qid].ready = ready;
> > +       ifcvf_set_vq_ready(vf, qid, ready);
> >  }
> >
> >  static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
> >  {
> >         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
> > +       bool ready;
> >
> > -       return vf->vring[qid].ready;
> > +       ready = ifcvf_get_vq_ready(vf, qid);
> > +
> > +       return ready;
> >  }
> >
> >  static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
> > @@ -569,7 +540,7 @@ static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
> >  {
> >         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
> >
> > -       vf->vring[qid].size = num;
> > +       ifcvf_set_vq_num(vf, qid, num);
> >  }
> >
> >  static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
> > @@ -578,11 +549,7 @@ static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
> >  {
> >         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
> >
> > -       vf->vring[qid].desc = desc_area;
> > -       vf->vring[qid].avail = driver_area;
> > -       vf->vring[qid].used = device_area;
> > -
> > -       return 0;
> > +       return ifcvf_set_vq_address(vf, qid, desc_area, driver_area, device_area);
> >  }
> >
> >  static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
> > --
> > 2.31.1
> >

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] vDPA/ifcvf: allow userspace to suspend a queue
  2022-04-12  6:55 ` Zhou Furong
@ 2022-04-21 11:05   ` Zhu, Lingshan
  2022-04-21 14:27     ` Zhou Furong
  0 siblings, 1 reply; 10+ messages in thread
From: Zhu, Lingshan @ 2022-04-21 11:05 UTC (permalink / raw)
  To: Zhou Furong, jasowang, mst; +Cc: virtualization, netdev



On 4/12/2022 2:55 PM, Zhou Furong wrote:
> Hi,
>
>> +bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid)
>> +{
>> +    struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
>> +    bool queue_enable;
>> +
>> +    vp_iowrite16(qid, &cfg->queue_select);
>> +    queue_enable = vp_ioread16(&cfg->queue_enable);
>> +
>> +    return (bool)queue_enable;
> queue_enable is bool, why cast? looks like remove the variable is better.
> return vp_ioread16(&cfg->queue_enable);
Hi, thanks for your comments. This cast tries to make some static 
checkers happy.
Please correct me if I misunderstood it:
vp_ioread16() returns an u16, and bool is not an one bit variable, it is 
C99 _Bool.
C99 defines _Bool to be true(expends to int 1) or false(expends to int 0).
I think this implies a bool is actually capable to store an u16.
so I am afraid some checkers may complain about "queue_enable = 
vp_ioread16(&cfg->queue_enable);",
a cast here can help us silence the checkers.

see http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1256.pdf section 7.16
and https://www.kernel.org/doc/Documentation/process/coding-style.rst 
section 17

Thanks,
Zhu Lingshan
>
>>   static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, 
>> u16 qid)
>>   {
>>       struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>> +    bool ready;
>>   -    return vf->vring[qid].ready;
>> +    ready = ifcvf_get_vq_ready(vf, qid);
>> +
>> +    return ready;
> remove ready looks better
> return ifcvf_get_vq_ready(vf, qid);
>
>
> Best regards,
> Furong


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] vDPA/ifcvf: allow userspace to suspend a queue
  2022-04-13  8:25   ` Jason Wang
  (?)
  (?)
@ 2022-04-21 11:11   ` Zhu, Lingshan
  -1 siblings, 0 replies; 10+ messages in thread
From: Zhu, Lingshan @ 2022-04-21 11:11 UTC (permalink / raw)
  To: Jason Wang, Michael S. Tsirkin; +Cc: virtualization, netdev



On 4/13/2022 4:25 PM, Jason Wang wrote:
> On Mon, Apr 11, 2022 at 11:18 AM Zhu Lingshan<lingshan.zhu@intel.com>  wrote:
>> Formerly, ifcvf driver has implemented a lazy-initialization mechanism
>> for the virtqueues, it would store all virtqueue config fields that
>> passed down from the userspace, then load them to the virtqueues and
>> enable the queues upon DRIVER_OK.
>>
>> To allow the userspace to suspend a virtqueue,
>> this commit passes queue_enable to the virtqueue directly through
>> set_vq_ready().
>>
>> This feature requires and this commits implementing all virtqueue
>> ops(set_vq_addr, set_vq_num and set_vq_ready) to take immediate
>> actions than lazy-initialization, so ifcvf_hw_enable() is retired.
>>
>> To avoid losing virtqueue configurations caused by multiple
>> rounds of reset(), this commit also refactors thed evice reset
>> routine, now it simply reset the config handler and the virtqueues,
>> and only once device-reset().
>>
>> Signed-off-by: Zhu Lingshan<lingshan.zhu@intel.com>
>> ---
>>   drivers/vdpa/ifcvf/ifcvf_base.c | 94 ++++++++++++++++++++-------------
>>   drivers/vdpa/ifcvf/ifcvf_base.h | 11 ++--
>>   drivers/vdpa/ifcvf/ifcvf_main.c | 57 +++++---------------
>>   3 files changed, 75 insertions(+), 87 deletions(-)
>>
>> diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
>> index 48c4dadb0c7c..19eb0dcac123 100644
>> --- a/drivers/vdpa/ifcvf/ifcvf_base.c
>> +++ b/drivers/vdpa/ifcvf/ifcvf_base.c
>> @@ -175,16 +175,12 @@ u8 ifcvf_get_status(struct ifcvf_hw *hw)
>>   void ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
>>   {
>>          vp_iowrite8(status, &hw->common_cfg->device_status);
>> +       vp_ioread8(&hw->common_cfg->device_status);
> This looks confusing, the name of the function is to set the status
> but what actually implemented here is to get the status.
this read is to flush IO, however I think we don't need to flush the
IO requests until DRIVER_OK will make sure they flushed.
>
>>   }
>>
>>   void ifcvf_reset(struct ifcvf_hw *hw)
>>   {
>> -       hw->config_cb.callback = NULL;
>> -       hw->config_cb.private = NULL;
>> -
>>          ifcvf_set_status(hw, 0);
>> -       /* flush set_status, make sure VF is stopped, reset */
>> -       ifcvf_get_status(hw);
>>   }
>>
>>   static void ifcvf_add_status(struct ifcvf_hw *hw, u8 status)
>> @@ -331,68 +327,94 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
>>          ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
>>          q_pair_id = qid / hw->nr_vring;
>>          avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
>> -       hw->vring[qid].last_avail_idx = num;
>>          vp_iowrite16(num, avail_idx_addr);
>> +       vp_ioread16(avail_idx_addr);
> This looks like a bug fix.
same as above, this flush has been removed, write DRIVER_OK to flush 
them once for all.
>
>>          return 0;
>>   }
>>
>> -static int ifcvf_hw_enable(struct ifcvf_hw *hw)
>> +void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num)
>>   {
>> -       struct virtio_pci_common_cfg __iomem *cfg;
>> -       u32 i;
>> +       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
>>
>> -       cfg = hw->common_cfg;
>> -       for (i = 0; i < hw->nr_vring; i++) {
>> -               if (!hw->vring[i].ready)
>> -                       break;
>> +       vp_iowrite16(qid, &cfg->queue_select);
>> +       vp_iowrite16(num, &cfg->queue_size);
>> +       vp_ioread16(&cfg->queue_size);
>> +}
>>
>> -               vp_iowrite16(i, &cfg->queue_select);
>> -               vp_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
>> -                                    &cfg->queue_desc_hi);
>> -               vp_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
>> -                                     &cfg->queue_avail_hi);
>> -               vp_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
>> -                                    &cfg->queue_used_hi);
>> -               vp_iowrite16(hw->vring[i].size, &cfg->queue_size);
>> -               ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx);
>> -               vp_iowrite16(1, &cfg->queue_enable);
>> -       }
>> +int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
>> +                        u64 driver_area, u64 device_area)
>> +{
>> +       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
>> +
>> +       vp_iowrite16(qid, &cfg->queue_select);
>> +       vp_iowrite64_twopart(desc_area, &cfg->queue_desc_lo,
>> +                            &cfg->queue_desc_hi);
>> +       vp_iowrite64_twopart(driver_area, &cfg->queue_avail_lo,
>> +                            &cfg->queue_avail_hi);
>> +       vp_iowrite64_twopart(device_area, &cfg->queue_used_lo,
>> +                            &cfg->queue_used_hi);
>> +       /* to flush IO */
>> +       vp_ioread16(&cfg->queue_select);
> Why do we need to flush I/O here?
yes, same as above
>>          return 0;
>>   }
>>
>> -static void ifcvf_hw_disable(struct ifcvf_hw *hw)
>> +void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready)
>>   {
>> -       u32 i;
>> +       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
>>
>> -       ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
>> -       for (i = 0; i < hw->nr_vring; i++) {
>> -               ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR);
>> -       }
>> +       vp_iowrite16(qid, &cfg->queue_select);
>> +       vp_iowrite16(ready, &cfg->queue_enable);
> I think we need a comment to explain that IFCVF can support write to
> queue_enable since it's not allowed by the virtio spec.
sure
>
>> +       vp_ioread16(&cfg->queue_enable);
>> +}
>> +
>> +bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid)
>> +{
>> +       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
>> +       bool queue_enable;
>> +
>> +       vp_iowrite16(qid, &cfg->queue_select);
>> +       queue_enable = vp_ioread16(&cfg->queue_enable);
>> +
>> +       return (bool)queue_enable;
>>   }
>>
>>   int ifcvf_start_hw(struct ifcvf_hw *hw)
>>   {
>> -       ifcvf_reset(hw);
>>          ifcvf_add_status(hw, VIRTIO_CONFIG_S_ACKNOWLEDGE);
>>          ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER);
>>
>>          if (ifcvf_config_features(hw) < 0)
>>                  return -EINVAL;
>>
>> -       if (ifcvf_hw_enable(hw) < 0)
>> -               return -EINVAL;
>> -
>>          ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER_OK);
>>
>>          return 0;
>>   }
>>
>> +static void ifcvf_reset_vring(struct ifcvf_hw *hw)
>> +{
>> +       int i;
>> +
>> +       for (i = 0; i < hw->nr_vring; i++) {
>> +               hw->vring[i].cb.callback = NULL;
>> +               hw->vring[i].cb.private = NULL;
>> +               ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR);
>> +       }
>> +}
>> +
>> +static void ifcvf_reset_config_handler(struct ifcvf_hw *hw)
>> +{
>> +       hw->config_cb.callback = NULL;
>> +       hw->config_cb.private = NULL;
>> +       ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
> Do we need to synchronize with the IRQ here?
added synchronize_irq for both reset_vring() and reset_config_handler()
>> +}
>> +
>>   void ifcvf_stop_hw(struct ifcvf_hw *hw)
>>   {
>> -       ifcvf_hw_disable(hw);
>> -       ifcvf_reset(hw);
>> +       ifcvf_reset_vring(hw);
>> +       ifcvf_reset_config_handler(hw);
>>   }
>>
>>   void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
>> diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
>> index 115b61f4924b..41d86985361f 100644
>> --- a/drivers/vdpa/ifcvf/ifcvf_base.h
>> +++ b/drivers/vdpa/ifcvf/ifcvf_base.h
>> @@ -49,12 +49,6 @@
>>   #define MSIX_VECTOR_DEV_SHARED                 3
>>
>>   struct vring_info {
>> -       u64 desc;
>> -       u64 avail;
>> -       u64 used;
>> -       u16 size;
>> -       u16 last_avail_idx;
>> -       bool ready;
>>          void __iomem *notify_addr;
>>          phys_addr_t notify_pa;
>>          u32 irq;
>> @@ -131,6 +125,11 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
>>   struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
>>   int ifcvf_probed_virtio_net(struct ifcvf_hw *hw);
>>   u32 ifcvf_get_config_size(struct ifcvf_hw *hw);
>> +int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
>> +                        u64 driver_area, u64 device_area);
>>   u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector);
>>   u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector);
>> +void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num);
>> +void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready);
>> +bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid);
>>   #endif /* _IFCVF_H_ */
>> diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
>> index 4366320fb68d..e442aa11333e 100644
>> --- a/drivers/vdpa/ifcvf/ifcvf_main.c
>> +++ b/drivers/vdpa/ifcvf/ifcvf_main.c
>> @@ -374,37 +374,6 @@ static int ifcvf_start_datapath(void *private)
>>          return ret;
>>   }
>>
>> -static int ifcvf_stop_datapath(void *private)
>> -{
>> -       struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
>> -       int i;
>> -
>> -       for (i = 0; i < vf->nr_vring; i++)
>> -               vf->vring[i].cb.callback = NULL;
>> -
>> -       ifcvf_stop_hw(vf);
>> -
>> -       return 0;
>> -}
>> -
>> -static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
>> -{
>> -       struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
>> -       int i;
>> -
>> -       for (i = 0; i < vf->nr_vring; i++) {
>> -               vf->vring[i].last_avail_idx = 0;
>> -               vf->vring[i].desc = 0;
>> -               vf->vring[i].avail = 0;
>> -               vf->vring[i].used = 0;
>> -               vf->vring[i].ready = 0;
>> -               vf->vring[i].cb.callback = NULL;
>> -               vf->vring[i].cb.private = NULL;
>> -       }
>> -
>> -       ifcvf_reset(vf);
>> -}
>> -
>>   static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
>>   {
>>          return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
>> @@ -477,6 +446,8 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
>>          if (status_old == status)
>>                  return;
>>
>> +       ifcvf_set_status(vf, status);
>> +
>>          if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
>>              !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
>>                  ret = ifcvf_request_irq(adapter);
> Does this mean e.g for DRIVER_OK the device may work before the
> interrupt handler is requested?
now we call ifcvf_set_status() which write status to the device in the 
end of function
ifcvf_vdpa_set_status(), so it has a chance to check whether it's 
setting DRIVER_OK,
then it can request_irq upon DRIVER_OK, no gaps anymore.
> Looks racy.
>
> Thanks
>
>> @@ -493,7 +464,6 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
>>                                    status);
>>          }
>>
>> -       ifcvf_set_status(vf, status);
>>   }
>>
>>   static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
>> @@ -509,12 +479,10 @@ static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
>>          if (status_old == 0)
>>                  return 0;
>>
>> -       if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
>> -               ifcvf_stop_datapath(adapter);
>> -               ifcvf_free_irq(adapter);
>> -       }
>> +       ifcvf_stop_hw(vf);
>> +       ifcvf_free_irq(adapter);
>>
>> -       ifcvf_reset_vring(adapter);
>> +       ifcvf_reset(vf);
>>
>>          return 0;
>>   }
>> @@ -554,14 +522,17 @@ static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
>>   {
>>          struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>>
>> -       vf->vring[qid].ready = ready;
>> +       ifcvf_set_vq_ready(vf, qid, ready);
>>   }
>>
>>   static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
>>   {
>>          struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>> +       bool ready;
>>
>> -       return vf->vring[qid].ready;
>> +       ready = ifcvf_get_vq_ready(vf, qid);
>> +
>> +       return ready;
>>   }
>>
>>   static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
>> @@ -569,7 +540,7 @@ static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
>>   {
>>          struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>>
>> -       vf->vring[qid].size = num;
>> +       ifcvf_set_vq_num(vf, qid, num);
>>   }
>>
>>   static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
>> @@ -578,11 +549,7 @@ static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
>>   {
>>          struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>>
>> -       vf->vring[qid].desc = desc_area;
>> -       vf->vring[qid].avail = driver_area;
>> -       vf->vring[qid].used = device_area;
>> -
>> -       return 0;
>> +       return ifcvf_set_vq_address(vf, qid, desc_area, driver_area, device_area);
>>   }
>>
>>   static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
>> --
>> 2.31.1
>>


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] vDPA/ifcvf: allow userspace to suspend a queue
  2022-04-13  8:54     ` Michael S. Tsirkin
  (?)
@ 2022-04-21 11:13     ` Zhu, Lingshan
  -1 siblings, 0 replies; 10+ messages in thread
From: Zhu, Lingshan @ 2022-04-21 11:13 UTC (permalink / raw)
  To: Michael S. Tsirkin, Jason Wang; +Cc: virtualization, netdev



On 4/13/2022 4:54 PM, Michael S. Tsirkin wrote:
> On Wed, Apr 13, 2022 at 04:25:22PM +0800, Jason Wang wrote:
>> On Mon, Apr 11, 2022 at 11:18 AM Zhu Lingshan <lingshan.zhu@intel.com> wrote:
>>> Formerly, ifcvf driver has implemented a lazy-initialization mechanism
>>> for the virtqueues, it would store all virtqueue config fields that
>>> passed down from the userspace, then load them to the virtqueues and
>>> enable the queues upon DRIVER_OK.
>>>
>>> To allow the userspace to suspend a virtqueue,
>>> this commit passes queue_enable to the virtqueue directly through
>>> set_vq_ready().
>>>
>>> This feature requires and this commits implementing all virtqueue
>>> ops(set_vq_addr, set_vq_num and set_vq_ready) to take immediate
>>> actions than lazy-initialization, so ifcvf_hw_enable() is retired.
>>>
>>> To avoid losing virtqueue configurations caused by multiple
>>> rounds of reset(), this commit also refactors thed evice reset
>>> routine, now it simply reset the config handler and the virtqueues,
>>> and only once device-reset().
>>>
>>> Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
>>> ---
>>>   drivers/vdpa/ifcvf/ifcvf_base.c | 94 ++++++++++++++++++++-------------
>>>   drivers/vdpa/ifcvf/ifcvf_base.h | 11 ++--
>>>   drivers/vdpa/ifcvf/ifcvf_main.c | 57 +++++---------------
>>>   3 files changed, 75 insertions(+), 87 deletions(-)
>>>
>>> diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
>>> index 48c4dadb0c7c..19eb0dcac123 100644
>>> --- a/drivers/vdpa/ifcvf/ifcvf_base.c
>>> +++ b/drivers/vdpa/ifcvf/ifcvf_base.c
>>> @@ -175,16 +175,12 @@ u8 ifcvf_get_status(struct ifcvf_hw *hw)
>>>   void ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
>>>   {
>>>          vp_iowrite8(status, &hw->common_cfg->device_status);
>>> +       vp_ioread8(&hw->common_cfg->device_status);
>> This looks confusing, the name of the function is to set the status
>> but what actually implemented here is to get the status.
>>
>>>   }
>>>
>>>   void ifcvf_reset(struct ifcvf_hw *hw)
>>>   {
>>> -       hw->config_cb.callback = NULL;
>>> -       hw->config_cb.private = NULL;
>>> -
>>>          ifcvf_set_status(hw, 0);
>>> -       /* flush set_status, make sure VF is stopped, reset */
>>> -       ifcvf_get_status(hw);
>>>   }
>>>
>>>   static void ifcvf_add_status(struct ifcvf_hw *hw, u8 status)
>>> @@ -331,68 +327,94 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
>>>          ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
>>>          q_pair_id = qid / hw->nr_vring;
>>>          avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
>>> -       hw->vring[qid].last_avail_idx = num;
>>>          vp_iowrite16(num, avail_idx_addr);
>>> +       vp_ioread16(avail_idx_addr);
>> This looks like a bug fix.
> is this to flush out the status write?  pls add a comment
> explaining when and why it's needed.
I think this may not needed anymore, setting DRIVER_OK will flush them all,
so all extra flush are removed.
>
>>>          return 0;
>>>   }
>>>
>>> -static int ifcvf_hw_enable(struct ifcvf_hw *hw)
>>> +void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num)
>>>   {
>>> -       struct virtio_pci_common_cfg __iomem *cfg;
>>> -       u32 i;
>>> +       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
>>>
>>> -       cfg = hw->common_cfg;
>>> -       for (i = 0; i < hw->nr_vring; i++) {
>>> -               if (!hw->vring[i].ready)
>>> -                       break;
>>> +       vp_iowrite16(qid, &cfg->queue_select);
>>> +       vp_iowrite16(num, &cfg->queue_size);
>>> +       vp_ioread16(&cfg->queue_size);
>>> +}
>>>
>>> -               vp_iowrite16(i, &cfg->queue_select);
>>> -               vp_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
>>> -                                    &cfg->queue_desc_hi);
>>> -               vp_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
>>> -                                     &cfg->queue_avail_hi);
>>> -               vp_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
>>> -                                    &cfg->queue_used_hi);
>>> -               vp_iowrite16(hw->vring[i].size, &cfg->queue_size);
>>> -               ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx);
>>> -               vp_iowrite16(1, &cfg->queue_enable);
>>> -       }
>>> +int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
>>> +                        u64 driver_area, u64 device_area)
>>> +{
>>> +       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
>>> +
>>> +       vp_iowrite16(qid, &cfg->queue_select);
>>> +       vp_iowrite64_twopart(desc_area, &cfg->queue_desc_lo,
>>> +                            &cfg->queue_desc_hi);
>>> +       vp_iowrite64_twopart(driver_area, &cfg->queue_avail_lo,
>>> +                            &cfg->queue_avail_hi);
>>> +       vp_iowrite64_twopart(device_area, &cfg->queue_used_lo,
>>> +                            &cfg->queue_used_hi);
>>> +       /* to flush IO */
>>> +       vp_ioread16(&cfg->queue_select);
>> Why do we need to flush I/O here?
>>
>>>          return 0;
>>>   }
>>>
>>> -static void ifcvf_hw_disable(struct ifcvf_hw *hw)
>>> +void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready)
>>>   {
>>> -       u32 i;
>>> +       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
>>>
>>> -       ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
>>> -       for (i = 0; i < hw->nr_vring; i++) {
>>> -               ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR);
>>> -       }
>>> +       vp_iowrite16(qid, &cfg->queue_select);
>>> +       vp_iowrite16(ready, &cfg->queue_enable);
>> I think we need a comment to explain that IFCVF can support write to
>> queue_enable since it's not allowed by the virtio spec.
>
> I think you mean writing 0 there. writing 1 is allowed.
I have added a comment here says writing 0 would suspend the queue.
>
>
>>> +       vp_ioread16(&cfg->queue_enable);
>>> +}
>>> +
>>> +bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid)
>>> +{
>>> +       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
>>> +       bool queue_enable;
>>> +
>>> +       vp_iowrite16(qid, &cfg->queue_select);
>>> +       queue_enable = vp_ioread16(&cfg->queue_enable);
>>> +
>>> +       return (bool)queue_enable;
>>>   }
>>>
>>>   int ifcvf_start_hw(struct ifcvf_hw *hw)
>>>   {
>>> -       ifcvf_reset(hw);
>>>          ifcvf_add_status(hw, VIRTIO_CONFIG_S_ACKNOWLEDGE);
>>>          ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER);
>>>
>>>          if (ifcvf_config_features(hw) < 0)
>>>                  return -EINVAL;
>>>
>>> -       if (ifcvf_hw_enable(hw) < 0)
>>> -               return -EINVAL;
>>> -
>>>          ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER_OK);
>>>
>>>          return 0;
>>>   }
>>>
>>> +static void ifcvf_reset_vring(struct ifcvf_hw *hw)
>>> +{
>>> +       int i;
>>> +
>>> +       for (i = 0; i < hw->nr_vring; i++) {
>>> +               hw->vring[i].cb.callback = NULL;
>>> +               hw->vring[i].cb.private = NULL;
>>> +               ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR);
>>> +       }
>>> +}
>>> +
>>> +static void ifcvf_reset_config_handler(struct ifcvf_hw *hw)
>>> +{
>>> +       hw->config_cb.callback = NULL;
>>> +       hw->config_cb.private = NULL;
>>> +       ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
>> Do we need to synchronize with the IRQ here?
>>
>>> +}
>>> +
>>>   void ifcvf_stop_hw(struct ifcvf_hw *hw)
>>>   {
>>> -       ifcvf_hw_disable(hw);
>>> -       ifcvf_reset(hw);
>>> +       ifcvf_reset_vring(hw);
>>> +       ifcvf_reset_config_handler(hw);
>>>   }
>>>
>>>   void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
>>> diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
>>> index 115b61f4924b..41d86985361f 100644
>>> --- a/drivers/vdpa/ifcvf/ifcvf_base.h
>>> +++ b/drivers/vdpa/ifcvf/ifcvf_base.h
>>> @@ -49,12 +49,6 @@
>>>   #define MSIX_VECTOR_DEV_SHARED                 3
>>>
>>>   struct vring_info {
>>> -       u64 desc;
>>> -       u64 avail;
>>> -       u64 used;
>>> -       u16 size;
>>> -       u16 last_avail_idx;
>>> -       bool ready;
>>>          void __iomem *notify_addr;
>>>          phys_addr_t notify_pa;
>>>          u32 irq;
>>> @@ -131,6 +125,11 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
>>>   struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
>>>   int ifcvf_probed_virtio_net(struct ifcvf_hw *hw);
>>>   u32 ifcvf_get_config_size(struct ifcvf_hw *hw);
>>> +int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
>>> +                        u64 driver_area, u64 device_area);
>>>   u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector);
>>>   u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector);
>>> +void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num);
>>> +void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready);
>>> +bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid);
>>>   #endif /* _IFCVF_H_ */
>>> diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
>>> index 4366320fb68d..e442aa11333e 100644
>>> --- a/drivers/vdpa/ifcvf/ifcvf_main.c
>>> +++ b/drivers/vdpa/ifcvf/ifcvf_main.c
>>> @@ -374,37 +374,6 @@ static int ifcvf_start_datapath(void *private)
>>>          return ret;
>>>   }
>>>
>>> -static int ifcvf_stop_datapath(void *private)
>>> -{
>>> -       struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
>>> -       int i;
>>> -
>>> -       for (i = 0; i < vf->nr_vring; i++)
>>> -               vf->vring[i].cb.callback = NULL;
>>> -
>>> -       ifcvf_stop_hw(vf);
>>> -
>>> -       return 0;
>>> -}
>>> -
>>> -static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
>>> -{
>>> -       struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
>>> -       int i;
>>> -
>>> -       for (i = 0; i < vf->nr_vring; i++) {
>>> -               vf->vring[i].last_avail_idx = 0;
>>> -               vf->vring[i].desc = 0;
>>> -               vf->vring[i].avail = 0;
>>> -               vf->vring[i].used = 0;
>>> -               vf->vring[i].ready = 0;
>>> -               vf->vring[i].cb.callback = NULL;
>>> -               vf->vring[i].cb.private = NULL;
>>> -       }
>>> -
>>> -       ifcvf_reset(vf);
>>> -}
>>> -
>>>   static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
>>>   {
>>>          return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
>>> @@ -477,6 +446,8 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
>>>          if (status_old == status)
>>>                  return;
>>>
>>> +       ifcvf_set_status(vf, status);
>>> +
>>>          if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
>>>              !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
>>>                  ret = ifcvf_request_irq(adapter);
>> Does this mean e.g for DRIVER_OK the device may work before the
>> interrupt handler is requested?
>>
>> Looks racy.
>>
>> Thanks
>>
>>> @@ -493,7 +464,6 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
>>>                                    status);
>>>          }
>>>
>>> -       ifcvf_set_status(vf, status);
>>>   }
>>>
>>>   static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
>>> @@ -509,12 +479,10 @@ static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
>>>          if (status_old == 0)
>>>                  return 0;
>>>
>>> -       if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
>>> -               ifcvf_stop_datapath(adapter);
>>> -               ifcvf_free_irq(adapter);
>>> -       }
>>> +       ifcvf_stop_hw(vf);
>>> +       ifcvf_free_irq(adapter);
>>>
>>> -       ifcvf_reset_vring(adapter);
>>> +       ifcvf_reset(vf);
>>>
>>>          return 0;
>>>   }
>>> @@ -554,14 +522,17 @@ static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
>>>   {
>>>          struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>>>
>>> -       vf->vring[qid].ready = ready;
>>> +       ifcvf_set_vq_ready(vf, qid, ready);
>>>   }
>>>
>>>   static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
>>>   {
>>>          struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>>> +       bool ready;
>>>
>>> -       return vf->vring[qid].ready;
>>> +       ready = ifcvf_get_vq_ready(vf, qid);
>>> +
>>> +       return ready;
>>>   }
>>>
>>>   static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
>>> @@ -569,7 +540,7 @@ static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
>>>   {
>>>          struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>>>
>>> -       vf->vring[qid].size = num;
>>> +       ifcvf_set_vq_num(vf, qid, num);
>>>   }
>>>
>>>   static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
>>> @@ -578,11 +549,7 @@ static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
>>>   {
>>>          struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>>>
>>> -       vf->vring[qid].desc = desc_area;
>>> -       vf->vring[qid].avail = driver_area;
>>> -       vf->vring[qid].used = device_area;
>>> -
>>> -       return 0;
>>> +       return ifcvf_set_vq_address(vf, qid, desc_area, driver_area, device_area);
>>>   }
>>>
>>>   static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
>>> --
>>> 2.31.1
>>>


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] vDPA/ifcvf: allow userspace to suspend a queue
  2022-04-21 11:05   ` Zhu, Lingshan
@ 2022-04-21 14:27     ` Zhou Furong
  0 siblings, 0 replies; 10+ messages in thread
From: Zhou Furong @ 2022-04-21 14:27 UTC (permalink / raw)
  To: Zhu, Lingshan, jasowang, mst; +Cc: virtualization, netdev



On 2022/4/21 19:05, Zhu, Lingshan wrote:
> 
> 
> On 4/12/2022 2:55 PM, Zhou Furong wrote:
>> Hi,
>>
>>> +bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid)
>>> +{
>>> +    struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
>>> +    bool queue_enable;
>>> +
>>> +    vp_iowrite16(qid, &cfg->queue_select);
>>> +    queue_enable = vp_ioread16(&cfg->queue_enable);
>>> +
>>> +    return (bool)queue_enable;
>> queue_enable is bool, why cast? looks like remove the variable is better.
>> return vp_ioread16(&cfg->queue_enable);
> Hi, thanks for your comments. This cast tries to make some static 
> checkers happy.
> Please correct me if I misunderstood it:
> vp_ioread16() returns an u16, and bool is not an one bit variable, it is 
> C99 _Bool.
> C99 defines _Bool to be true(expends to int 1) or false(expends to int 0).
> I think this implies a bool is actually capable to store an u16.
> so I am afraid some checkers may complain about "queue_enable = 
> vp_ioread16(&cfg->queue_enable);",
> a cast here can help us silence the checkers.
> 
> see http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1256.pdf section 7.16
> and https://www.kernel.org/doc/Documentation/process/coding-style.rst 
> section 17
> 
> Thanks,
> Zhu Lingshan
>>
bool type contains 1 bit 1/0 only, any number assign to a boolean 
variable will auto cast to 1 or 0.

if you want make static checker happy(if there is)
queue_enable = (bool)vp_ioread16(&cfg->queue_enable);
return queue_enable;

or
return (bool)vp_ioread16(&cfg->queue_enable);




>>>   static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, 
>>> u16 qid)
>>>   {
>>>       struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>>> +    bool ready;
>>>   -    return vf->vring[qid].ready;
>>> +    ready = ifcvf_get_vq_ready(vf, qid);
>>> +
>>> +    return ready;
>> remove ready looks better
>> return ifcvf_get_vq_ready(vf, qid);
>>
>>
>> Best regards,
>> Furong
> 

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2022-04-21 14:27 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-04-11  3:10 [PATCH] vDPA/ifcvf: allow userspace to suspend a queue Zhu Lingshan
2022-04-12  6:55 ` Zhou Furong
2022-04-21 11:05   ` Zhu, Lingshan
2022-04-21 14:27     ` Zhou Furong
2022-04-13  8:25 ` Jason Wang
2022-04-13  8:25   ` Jason Wang
2022-04-13  8:54   ` Michael S. Tsirkin
2022-04-13  8:54     ` Michael S. Tsirkin
2022-04-21 11:13     ` Zhu, Lingshan
2022-04-21 11:11   ` Zhu, Lingshan

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.