All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH V2] vDPA/ifcvf: allow userspace to suspend a queue
@ 2022-04-24 11:33 Zhu Lingshan
  2022-04-27  5:56   ` Jason Wang
  0 siblings, 1 reply; 4+ messages in thread
From: Zhu Lingshan @ 2022-04-24 11:33 UTC (permalink / raw)
  To: jasowang, mst; +Cc: virtualization, netdev, Zhu Lingshan

Formerly, ifcvf driver has implemented a lazy-initialization mechanism
for the virtqueues, it would store all virtqueue config fields that
passed down from the userspace, then load them to the virtqueues and
enable the queues upon DRIVER_OK.

To allow the userspace to suspend a virtqueue,
this commit passes queue_enable to the virtqueue directly through
set_vq_ready().

This feature requires and this commits implementing all virtqueue
ops(set_vq_addr, set_vq_num and set_vq_ready) to take immediate
actions than lazy-initialization, so ifcvf_hw_enable() is retired.

set_features() should take immediate actions as well.

ifcvf_add_status() is retierd because we should not add
status like FEATURES_OK by ifcvf's decision, this driver should
only set device status upon vdpa_ops.set_status()

To avoid losing virtqueue configurations caused by multiple
rounds of reset(), this commit also refactors thed evice reset
routine, now it simply reset the config handler and the virtqueues,
and only once device-reset().

Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
---
 drivers/vdpa/ifcvf/ifcvf_base.c | 150 +++++++++++++++++++-------------
 drivers/vdpa/ifcvf/ifcvf_base.h |  16 ++--
 drivers/vdpa/ifcvf/ifcvf_main.c |  81 +++--------------
 3 files changed, 111 insertions(+), 136 deletions(-)

diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index 48c4dadb0c7c..bbc9007a6f34 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -179,20 +179,7 @@ void ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
 
 void ifcvf_reset(struct ifcvf_hw *hw)
 {
-	hw->config_cb.callback = NULL;
-	hw->config_cb.private = NULL;
-
 	ifcvf_set_status(hw, 0);
-	/* flush set_status, make sure VF is stopped, reset */
-	ifcvf_get_status(hw);
-}
-
-static void ifcvf_add_status(struct ifcvf_hw *hw, u8 status)
-{
-	if (status != 0)
-		status |= ifcvf_get_status(hw);
-
-	ifcvf_set_status(hw, status);
 	ifcvf_get_status(hw);
 }
 
@@ -213,7 +200,7 @@ u64 ifcvf_get_hw_features(struct ifcvf_hw *hw)
 	return features;
 }
 
-u64 ifcvf_get_features(struct ifcvf_hw *hw)
+u64 ifcvf_get_device_features(struct ifcvf_hw *hw)
 {
 	return hw->hw_features;
 }
@@ -280,7 +267,7 @@ void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset,
 		vp_iowrite8(*p++, hw->dev_cfg + offset + i);
 }
 
-static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
+void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
 {
 	struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
 
@@ -289,22 +276,22 @@ static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
 
 	vp_iowrite32(1, &cfg->guest_feature_select);
 	vp_iowrite32(features >> 32, &cfg->guest_feature);
+
+	vp_ioread32(&cfg->guest_feature);
 }
 
-static int ifcvf_config_features(struct ifcvf_hw *hw)
+u64 ifcvf_get_features(struct ifcvf_hw *hw)
 {
-	struct ifcvf_adapter *ifcvf;
+	struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
+	u64 features;
 
-	ifcvf = vf_to_adapter(hw);
-	ifcvf_set_features(hw, hw->req_features);
-	ifcvf_add_status(hw, VIRTIO_CONFIG_S_FEATURES_OK);
+	vp_iowrite32(0, &cfg->device_feature_select);
+	features = vp_ioread32(&cfg->device_feature);
 
-	if (!(ifcvf_get_status(hw) & VIRTIO_CONFIG_S_FEATURES_OK)) {
-		IFCVF_ERR(ifcvf->pdev, "Failed to set FEATURES_OK status\n");
-		return -EIO;
-	}
+	vp_iowrite32(1, &cfg->device_feature_select);
+	features |= ((u64)vp_ioread32(&cfg->guest_feature) << 32);
 
-	return 0;
+	return features;
 }
 
 u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
@@ -331,68 +318,111 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
 	ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
 	q_pair_id = qid / hw->nr_vring;
 	avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
-	hw->vring[qid].last_avail_idx = num;
 	vp_iowrite16(num, avail_idx_addr);
 
 	return 0;
 }
 
-static int ifcvf_hw_enable(struct ifcvf_hw *hw)
+void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num)
 {
-	struct virtio_pci_common_cfg __iomem *cfg;
-	u32 i;
+	struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
 
-	cfg = hw->common_cfg;
-	for (i = 0; i < hw->nr_vring; i++) {
-		if (!hw->vring[i].ready)
-			break;
+	vp_iowrite16(qid, &cfg->queue_select);
+	vp_iowrite16(num, &cfg->queue_size);
+}
 
-		vp_iowrite16(i, &cfg->queue_select);
-		vp_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
-				     &cfg->queue_desc_hi);
-		vp_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
-				      &cfg->queue_avail_hi);
-		vp_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
-				     &cfg->queue_used_hi);
-		vp_iowrite16(hw->vring[i].size, &cfg->queue_size);
-		ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx);
-		vp_iowrite16(1, &cfg->queue_enable);
-	}
+int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
+			 u64 driver_area, u64 device_area)
+{
+	struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
+
+	vp_iowrite16(qid, &cfg->queue_select);
+	vp_iowrite64_twopart(desc_area, &cfg->queue_desc_lo,
+			     &cfg->queue_desc_hi);
+	vp_iowrite64_twopart(driver_area, &cfg->queue_avail_lo,
+			     &cfg->queue_avail_hi);
+	vp_iowrite64_twopart(device_area, &cfg->queue_used_lo,
+			     &cfg->queue_used_hi);
 
 	return 0;
 }
 
-static void ifcvf_hw_disable(struct ifcvf_hw *hw)
+void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready)
 {
-	u32 i;
+	struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
+
+	vp_iowrite16(qid, &cfg->queue_select);
+	/* write 0 to queue_enable will suspend a queue*/
+	vp_iowrite16(ready, &cfg->queue_enable);
+}
+
+bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid)
+{
+	struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
+	bool queue_enable;
+
+	vp_iowrite16(qid, &cfg->queue_select);
+	queue_enable = vp_ioread16(&cfg->queue_enable);
+
+	return (bool)queue_enable;
+}
+
+static void synchronize_per_vq_irq(struct ifcvf_hw *hw)
+{
+	int i;
 
-	ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
 	for (i = 0; i < hw->nr_vring; i++) {
-		ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR);
+		if (hw->vring[i].irq != -EINVAL)
+			synchronize_irq(hw->vring[i].irq);
 	}
 }
 
-int ifcvf_start_hw(struct ifcvf_hw *hw)
+static void synchronize_vqs_reused_irq(struct ifcvf_hw *hw)
 {
-	ifcvf_reset(hw);
-	ifcvf_add_status(hw, VIRTIO_CONFIG_S_ACKNOWLEDGE);
-	ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER);
+	if (hw->vqs_reused_irq != -EINVAL)
+		synchronize_irq(hw->vqs_reused_irq);
+}
 
-	if (ifcvf_config_features(hw) < 0)
-		return -EINVAL;
+static void synchronize_vq_irq(struct ifcvf_hw *hw)
+{
+	u8 status = hw->msix_vector_status;
 
-	if (ifcvf_hw_enable(hw) < 0)
-		return -EINVAL;
+	if (status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
+		synchronize_per_vq_irq(hw);
+	else
+		synchronize_vqs_reused_irq(hw);
+}
 
-	ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER_OK);
+static void synchronize_config_irq(struct ifcvf_hw *hw)
+{
+	if (hw->config_irq != -EINVAL)
+		synchronize_irq(hw->config_irq);
+}
 
-	return 0;
+static void ifcvf_reset_vring(struct ifcvf_hw *hw)
+{
+	int i;
+
+	for (i = 0; i < hw->nr_vring; i++) {
+		synchronize_vq_irq(hw);
+		hw->vring[i].cb.callback = NULL;
+		hw->vring[i].cb.private = NULL;
+		ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR);
+	}
+}
+
+static void ifcvf_reset_config_handler(struct ifcvf_hw *hw)
+{
+	synchronize_config_irq(hw);
+	hw->config_cb.callback = NULL;
+	hw->config_cb.private = NULL;
+	ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
 }
 
 void ifcvf_stop_hw(struct ifcvf_hw *hw)
 {
-	ifcvf_hw_disable(hw);
-	ifcvf_reset(hw);
+	ifcvf_reset_vring(hw);
+	ifcvf_reset_config_handler(hw);
 }
 
 void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
index 115b61f4924b..f3dce0d795cb 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -49,12 +49,6 @@
 #define MSIX_VECTOR_DEV_SHARED			3
 
 struct vring_info {
-	u64 desc;
-	u64 avail;
-	u64 used;
-	u16 size;
-	u16 last_avail_idx;
-	bool ready;
 	void __iomem *notify_addr;
 	phys_addr_t notify_pa;
 	u32 irq;
@@ -76,7 +70,6 @@ struct ifcvf_hw {
 	phys_addr_t notify_base_pa;
 	u32 notify_off_multiplier;
 	u32 dev_type;
-	u64 req_features;
 	u64 hw_features;
 	struct virtio_pci_common_cfg __iomem *common_cfg;
 	void __iomem *dev_cfg;
@@ -123,7 +116,7 @@ u8 ifcvf_get_status(struct ifcvf_hw *hw);
 void ifcvf_set_status(struct ifcvf_hw *hw, u8 status);
 void io_write64_twopart(u64 val, u32 *lo, u32 *hi);
 void ifcvf_reset(struct ifcvf_hw *hw);
-u64 ifcvf_get_features(struct ifcvf_hw *hw);
+u64 ifcvf_get_device_features(struct ifcvf_hw *hw);
 u64 ifcvf_get_hw_features(struct ifcvf_hw *hw);
 int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features);
 u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid);
@@ -131,6 +124,13 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
 struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
 int ifcvf_probed_virtio_net(struct ifcvf_hw *hw);
 u32 ifcvf_get_config_size(struct ifcvf_hw *hw);
+int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
+			 u64 driver_area, u64 device_area);
 u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector);
 u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector);
+void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num);
+void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready);
+bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid);
+void ifcvf_set_features(struct ifcvf_hw *hw, u64 features);
+u64 ifcvf_get_features(struct ifcvf_hw *hw);
 #endif /* _IFCVF_H_ */
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index 4366320fb68d..0257ba98cffe 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -358,53 +358,6 @@ static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
 	return 0;
 }
 
-static int ifcvf_start_datapath(void *private)
-{
-	struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
-	u8 status;
-	int ret;
-
-	ret = ifcvf_start_hw(vf);
-	if (ret < 0) {
-		status = ifcvf_get_status(vf);
-		status |= VIRTIO_CONFIG_S_FAILED;
-		ifcvf_set_status(vf, status);
-	}
-
-	return ret;
-}
-
-static int ifcvf_stop_datapath(void *private)
-{
-	struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
-	int i;
-
-	for (i = 0; i < vf->nr_vring; i++)
-		vf->vring[i].cb.callback = NULL;
-
-	ifcvf_stop_hw(vf);
-
-	return 0;
-}
-
-static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
-{
-	struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
-	int i;
-
-	for (i = 0; i < vf->nr_vring; i++) {
-		vf->vring[i].last_avail_idx = 0;
-		vf->vring[i].desc = 0;
-		vf->vring[i].avail = 0;
-		vf->vring[i].used = 0;
-		vf->vring[i].ready = 0;
-		vf->vring[i].cb.callback = NULL;
-		vf->vring[i].cb.private = NULL;
-	}
-
-	ifcvf_reset(vf);
-}
-
 static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
 {
 	return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
@@ -426,7 +379,7 @@ static u64 ifcvf_vdpa_get_device_features(struct vdpa_device *vdpa_dev)
 	u64 features;
 
 	if (type == VIRTIO_ID_NET || type == VIRTIO_ID_BLOCK)
-		features = ifcvf_get_features(vf);
+		features = ifcvf_get_device_features(vf);
 	else {
 		features = 0;
 		IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
@@ -444,7 +397,7 @@ static int ifcvf_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 feat
 	if (ret)
 		return ret;
 
-	vf->req_features = features;
+	ifcvf_set_features(vf, features);
 
 	return 0;
 }
@@ -453,7 +406,7 @@ static u64 ifcvf_vdpa_get_driver_features(struct vdpa_device *vdpa_dev)
 {
 	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 
-	return vf->req_features;
+	return ifcvf_get_features(vf);
 }
 
 static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
@@ -486,11 +439,6 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
 			ifcvf_set_status(vf, status);
 			return;
 		}
-
-		if (ifcvf_start_datapath(adapter) < 0)
-			IFCVF_ERR(adapter->pdev,
-				  "Failed to set ifcvf vdpa  status %u\n",
-				  status);
 	}
 
 	ifcvf_set_status(vf, status);
@@ -509,12 +457,10 @@ static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
 	if (status_old == 0)
 		return 0;
 
-	if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
-		ifcvf_stop_datapath(adapter);
-		ifcvf_free_irq(adapter);
-	}
+	ifcvf_stop_hw(vf);
+	ifcvf_free_irq(adapter);
 
-	ifcvf_reset_vring(adapter);
+	ifcvf_reset(vf);
 
 	return 0;
 }
@@ -554,14 +500,17 @@ static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
 {
 	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 
-	vf->vring[qid].ready = ready;
+	ifcvf_set_vq_ready(vf, qid, ready);
 }
 
 static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
 {
 	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+	bool ready;
+
+	ready = ifcvf_get_vq_ready(vf, qid);
 
-	return vf->vring[qid].ready;
+	return ready;
 }
 
 static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
@@ -569,7 +518,7 @@ static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
 {
 	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 
-	vf->vring[qid].size = num;
+	ifcvf_set_vq_num(vf, qid, num);
 }
 
 static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
@@ -578,11 +527,7 @@ static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
 {
 	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 
-	vf->vring[qid].desc = desc_area;
-	vf->vring[qid].avail = driver_area;
-	vf->vring[qid].used = device_area;
-
-	return 0;
+	return ifcvf_set_vq_address(vf, qid, desc_area, driver_area, device_area);
 }
 
 static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH V2] vDPA/ifcvf: allow userspace to suspend a queue
  2022-04-24 11:33 [PATCH V2] vDPA/ifcvf: allow userspace to suspend a queue Zhu Lingshan
@ 2022-04-27  5:56   ` Jason Wang
  0 siblings, 0 replies; 4+ messages in thread
From: Jason Wang @ 2022-04-27  5:56 UTC (permalink / raw)
  To: Zhu Lingshan, mst; +Cc: virtualization, netdev


在 2022/4/24 19:33, Zhu Lingshan 写道:
> Formerly, ifcvf driver has implemented a lazy-initialization mechanism
> for the virtqueues, it would store all virtqueue config fields that
> passed down from the userspace, then load them to the virtqueues and
> enable the queues upon DRIVER_OK.
>
> To allow the userspace to suspend a virtqueue,
> this commit passes queue_enable to the virtqueue directly through
> set_vq_ready().
>
> This feature requires and this commits implementing all virtqueue
> ops(set_vq_addr, set_vq_num and set_vq_ready) to take immediate
> actions than lazy-initialization, so ifcvf_hw_enable() is retired.
>
> set_features() should take immediate actions as well.
>
> ifcvf_add_status() is retierd because we should not add
> status like FEATURES_OK by ifcvf's decision, this driver should
> only set device status upon vdpa_ops.set_status()
>
> To avoid losing virtqueue configurations caused by multiple
> rounds of reset(), this commit also refactors thed evice reset
> routine, now it simply reset the config handler and the virtqueues,
> and only once device-reset().


It looks like the patch tries to do too many things at one run. I'd 
suggest to split them:


1) on-the-fly set via set_vq_ready(), but I don't see a reason why we 
need to change other lazy stuffs, since setting queue_enable to 1 before 
DRIVER_OK won't start the virtqueue anyhow
2) if necessary, converting the lazy stuffs
3) the synchornize_irq() fixes
4) other stuffs

Thanks


>
> Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
> ---
>   drivers/vdpa/ifcvf/ifcvf_base.c | 150 +++++++++++++++++++-------------
>   drivers/vdpa/ifcvf/ifcvf_base.h |  16 ++--
>   drivers/vdpa/ifcvf/ifcvf_main.c |  81 +++--------------
>   3 files changed, 111 insertions(+), 136 deletions(-)
>
> diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
> index 48c4dadb0c7c..bbc9007a6f34 100644
> --- a/drivers/vdpa/ifcvf/ifcvf_base.c
> +++ b/drivers/vdpa/ifcvf/ifcvf_base.c
> @@ -179,20 +179,7 @@ void ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
>   
>   void ifcvf_reset(struct ifcvf_hw *hw)
>   {
> -	hw->config_cb.callback = NULL;
> -	hw->config_cb.private = NULL;
> -
>   	ifcvf_set_status(hw, 0);
> -	/* flush set_status, make sure VF is stopped, reset */
> -	ifcvf_get_status(hw);
> -}
> -
> -static void ifcvf_add_status(struct ifcvf_hw *hw, u8 status)
> -{
> -	if (status != 0)
> -		status |= ifcvf_get_status(hw);
> -
> -	ifcvf_set_status(hw, status);
>   	ifcvf_get_status(hw);
>   }
>   
> @@ -213,7 +200,7 @@ u64 ifcvf_get_hw_features(struct ifcvf_hw *hw)
>   	return features;
>   }
>   
> -u64 ifcvf_get_features(struct ifcvf_hw *hw)
> +u64 ifcvf_get_device_features(struct ifcvf_hw *hw)
>   {
>   	return hw->hw_features;
>   }
> @@ -280,7 +267,7 @@ void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset,
>   		vp_iowrite8(*p++, hw->dev_cfg + offset + i);
>   }
>   
> -static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
> +void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
>   {
>   	struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
>   
> @@ -289,22 +276,22 @@ static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
>   
>   	vp_iowrite32(1, &cfg->guest_feature_select);
>   	vp_iowrite32(features >> 32, &cfg->guest_feature);
> +
> +	vp_ioread32(&cfg->guest_feature);
>   }
>   
> -static int ifcvf_config_features(struct ifcvf_hw *hw)
> +u64 ifcvf_get_features(struct ifcvf_hw *hw)
>   {
> -	struct ifcvf_adapter *ifcvf;
> +	struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
> +	u64 features;
>   
> -	ifcvf = vf_to_adapter(hw);
> -	ifcvf_set_features(hw, hw->req_features);
> -	ifcvf_add_status(hw, VIRTIO_CONFIG_S_FEATURES_OK);
> +	vp_iowrite32(0, &cfg->device_feature_select);
> +	features = vp_ioread32(&cfg->device_feature);
>   
> -	if (!(ifcvf_get_status(hw) & VIRTIO_CONFIG_S_FEATURES_OK)) {
> -		IFCVF_ERR(ifcvf->pdev, "Failed to set FEATURES_OK status\n");
> -		return -EIO;
> -	}
> +	vp_iowrite32(1, &cfg->device_feature_select);
> +	features |= ((u64)vp_ioread32(&cfg->guest_feature) << 32);
>   
> -	return 0;
> +	return features;
>   }
>   
>   u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
> @@ -331,68 +318,111 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
>   	ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
>   	q_pair_id = qid / hw->nr_vring;
>   	avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
> -	hw->vring[qid].last_avail_idx = num;
>   	vp_iowrite16(num, avail_idx_addr);
>   
>   	return 0;
>   }
>   
> -static int ifcvf_hw_enable(struct ifcvf_hw *hw)
> +void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num)
>   {
> -	struct virtio_pci_common_cfg __iomem *cfg;
> -	u32 i;
> +	struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
>   
> -	cfg = hw->common_cfg;
> -	for (i = 0; i < hw->nr_vring; i++) {
> -		if (!hw->vring[i].ready)
> -			break;
> +	vp_iowrite16(qid, &cfg->queue_select);
> +	vp_iowrite16(num, &cfg->queue_size);
> +}
>   
> -		vp_iowrite16(i, &cfg->queue_select);
> -		vp_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
> -				     &cfg->queue_desc_hi);
> -		vp_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
> -				      &cfg->queue_avail_hi);
> -		vp_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
> -				     &cfg->queue_used_hi);
> -		vp_iowrite16(hw->vring[i].size, &cfg->queue_size);
> -		ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx);
> -		vp_iowrite16(1, &cfg->queue_enable);
> -	}
> +int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
> +			 u64 driver_area, u64 device_area)
> +{
> +	struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
> +
> +	vp_iowrite16(qid, &cfg->queue_select);
> +	vp_iowrite64_twopart(desc_area, &cfg->queue_desc_lo,
> +			     &cfg->queue_desc_hi);
> +	vp_iowrite64_twopart(driver_area, &cfg->queue_avail_lo,
> +			     &cfg->queue_avail_hi);
> +	vp_iowrite64_twopart(device_area, &cfg->queue_used_lo,
> +			     &cfg->queue_used_hi);
>   
>   	return 0;
>   }
>   
> -static void ifcvf_hw_disable(struct ifcvf_hw *hw)
> +void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready)
>   {
> -	u32 i;
> +	struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
> +
> +	vp_iowrite16(qid, &cfg->queue_select);
> +	/* write 0 to queue_enable will suspend a queue*/
> +	vp_iowrite16(ready, &cfg->queue_enable);
> +}
> +
> +bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid)
> +{
> +	struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
> +	bool queue_enable;
> +
> +	vp_iowrite16(qid, &cfg->queue_select);
> +	queue_enable = vp_ioread16(&cfg->queue_enable);
> +
> +	return (bool)queue_enable;
> +}
> +
> +static void synchronize_per_vq_irq(struct ifcvf_hw *hw)
> +{
> +	int i;
>   
> -	ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
>   	for (i = 0; i < hw->nr_vring; i++) {
> -		ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR);
> +		if (hw->vring[i].irq != -EINVAL)
> +			synchronize_irq(hw->vring[i].irq);
>   	}
>   }
>   
> -int ifcvf_start_hw(struct ifcvf_hw *hw)
> +static void synchronize_vqs_reused_irq(struct ifcvf_hw *hw)
>   {
> -	ifcvf_reset(hw);
> -	ifcvf_add_status(hw, VIRTIO_CONFIG_S_ACKNOWLEDGE);
> -	ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER);
> +	if (hw->vqs_reused_irq != -EINVAL)
> +		synchronize_irq(hw->vqs_reused_irq);
> +}
>   
> -	if (ifcvf_config_features(hw) < 0)
> -		return -EINVAL;
> +static void synchronize_vq_irq(struct ifcvf_hw *hw)
> +{
> +	u8 status = hw->msix_vector_status;
>   
> -	if (ifcvf_hw_enable(hw) < 0)
> -		return -EINVAL;
> +	if (status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
> +		synchronize_per_vq_irq(hw);
> +	else
> +		synchronize_vqs_reused_irq(hw);
> +}
>   
> -	ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER_OK);
> +static void synchronize_config_irq(struct ifcvf_hw *hw)
> +{
> +	if (hw->config_irq != -EINVAL)
> +		synchronize_irq(hw->config_irq);
> +}
>   
> -	return 0;
> +static void ifcvf_reset_vring(struct ifcvf_hw *hw)
> +{
> +	int i;
> +
> +	for (i = 0; i < hw->nr_vring; i++) {
> +		synchronize_vq_irq(hw);
> +		hw->vring[i].cb.callback = NULL;
> +		hw->vring[i].cb.private = NULL;
> +		ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR);
> +	}
> +}
> +
> +static void ifcvf_reset_config_handler(struct ifcvf_hw *hw)
> +{
> +	synchronize_config_irq(hw);
> +	hw->config_cb.callback = NULL;
> +	hw->config_cb.private = NULL;
> +	ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
>   }
>   
>   void ifcvf_stop_hw(struct ifcvf_hw *hw)
>   {
> -	ifcvf_hw_disable(hw);
> -	ifcvf_reset(hw);
> +	ifcvf_reset_vring(hw);
> +	ifcvf_reset_config_handler(hw);
>   }
>   
>   void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
> diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
> index 115b61f4924b..f3dce0d795cb 100644
> --- a/drivers/vdpa/ifcvf/ifcvf_base.h
> +++ b/drivers/vdpa/ifcvf/ifcvf_base.h
> @@ -49,12 +49,6 @@
>   #define MSIX_VECTOR_DEV_SHARED			3
>   
>   struct vring_info {
> -	u64 desc;
> -	u64 avail;
> -	u64 used;
> -	u16 size;
> -	u16 last_avail_idx;
> -	bool ready;
>   	void __iomem *notify_addr;
>   	phys_addr_t notify_pa;
>   	u32 irq;
> @@ -76,7 +70,6 @@ struct ifcvf_hw {
>   	phys_addr_t notify_base_pa;
>   	u32 notify_off_multiplier;
>   	u32 dev_type;
> -	u64 req_features;
>   	u64 hw_features;
>   	struct virtio_pci_common_cfg __iomem *common_cfg;
>   	void __iomem *dev_cfg;
> @@ -123,7 +116,7 @@ u8 ifcvf_get_status(struct ifcvf_hw *hw);
>   void ifcvf_set_status(struct ifcvf_hw *hw, u8 status);
>   void io_write64_twopart(u64 val, u32 *lo, u32 *hi);
>   void ifcvf_reset(struct ifcvf_hw *hw);
> -u64 ifcvf_get_features(struct ifcvf_hw *hw);
> +u64 ifcvf_get_device_features(struct ifcvf_hw *hw);
>   u64 ifcvf_get_hw_features(struct ifcvf_hw *hw);
>   int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features);
>   u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid);
> @@ -131,6 +124,13 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
>   struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
>   int ifcvf_probed_virtio_net(struct ifcvf_hw *hw);
>   u32 ifcvf_get_config_size(struct ifcvf_hw *hw);
> +int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
> +			 u64 driver_area, u64 device_area);
>   u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector);
>   u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector);
> +void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num);
> +void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready);
> +bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid);
> +void ifcvf_set_features(struct ifcvf_hw *hw, u64 features);
> +u64 ifcvf_get_features(struct ifcvf_hw *hw);
>   #endif /* _IFCVF_H_ */
> diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
> index 4366320fb68d..0257ba98cffe 100644
> --- a/drivers/vdpa/ifcvf/ifcvf_main.c
> +++ b/drivers/vdpa/ifcvf/ifcvf_main.c
> @@ -358,53 +358,6 @@ static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
>   	return 0;
>   }
>   
> -static int ifcvf_start_datapath(void *private)
> -{
> -	struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
> -	u8 status;
> -	int ret;
> -
> -	ret = ifcvf_start_hw(vf);
> -	if (ret < 0) {
> -		status = ifcvf_get_status(vf);
> -		status |= VIRTIO_CONFIG_S_FAILED;
> -		ifcvf_set_status(vf, status);
> -	}
> -
> -	return ret;
> -}
> -
> -static int ifcvf_stop_datapath(void *private)
> -{
> -	struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
> -	int i;
> -
> -	for (i = 0; i < vf->nr_vring; i++)
> -		vf->vring[i].cb.callback = NULL;
> -
> -	ifcvf_stop_hw(vf);
> -
> -	return 0;
> -}
> -
> -static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
> -{
> -	struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
> -	int i;
> -
> -	for (i = 0; i < vf->nr_vring; i++) {
> -		vf->vring[i].last_avail_idx = 0;
> -		vf->vring[i].desc = 0;
> -		vf->vring[i].avail = 0;
> -		vf->vring[i].used = 0;
> -		vf->vring[i].ready = 0;
> -		vf->vring[i].cb.callback = NULL;
> -		vf->vring[i].cb.private = NULL;
> -	}
> -
> -	ifcvf_reset(vf);
> -}
> -
>   static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
>   {
>   	return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
> @@ -426,7 +379,7 @@ static u64 ifcvf_vdpa_get_device_features(struct vdpa_device *vdpa_dev)
>   	u64 features;
>   
>   	if (type == VIRTIO_ID_NET || type == VIRTIO_ID_BLOCK)
> -		features = ifcvf_get_features(vf);
> +		features = ifcvf_get_device_features(vf);
>   	else {
>   		features = 0;
>   		IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
> @@ -444,7 +397,7 @@ static int ifcvf_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 feat
>   	if (ret)
>   		return ret;
>   
> -	vf->req_features = features;
> +	ifcvf_set_features(vf, features);
>   
>   	return 0;
>   }
> @@ -453,7 +406,7 @@ static u64 ifcvf_vdpa_get_driver_features(struct vdpa_device *vdpa_dev)
>   {
>   	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>   
> -	return vf->req_features;
> +	return ifcvf_get_features(vf);
>   }
>   
>   static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
> @@ -486,11 +439,6 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
>   			ifcvf_set_status(vf, status);
>   			return;
>   		}
> -
> -		if (ifcvf_start_datapath(adapter) < 0)
> -			IFCVF_ERR(adapter->pdev,
> -				  "Failed to set ifcvf vdpa  status %u\n",
> -				  status);
>   	}
>   
>   	ifcvf_set_status(vf, status);
> @@ -509,12 +457,10 @@ static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
>   	if (status_old == 0)
>   		return 0;
>   
> -	if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
> -		ifcvf_stop_datapath(adapter);
> -		ifcvf_free_irq(adapter);
> -	}
> +	ifcvf_stop_hw(vf);
> +	ifcvf_free_irq(adapter);
>   
> -	ifcvf_reset_vring(adapter);
> +	ifcvf_reset(vf);
>   
>   	return 0;
>   }
> @@ -554,14 +500,17 @@ static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
>   {
>   	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>   
> -	vf->vring[qid].ready = ready;
> +	ifcvf_set_vq_ready(vf, qid, ready);
>   }
>   
>   static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
>   {
>   	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
> +	bool ready;
> +
> +	ready = ifcvf_get_vq_ready(vf, qid);
>   
> -	return vf->vring[qid].ready;
> +	return ready;
>   }
>   
>   static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
> @@ -569,7 +518,7 @@ static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
>   {
>   	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>   
> -	vf->vring[qid].size = num;
> +	ifcvf_set_vq_num(vf, qid, num);
>   }
>   
>   static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
> @@ -578,11 +527,7 @@ static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
>   {
>   	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>   
> -	vf->vring[qid].desc = desc_area;
> -	vf->vring[qid].avail = driver_area;
> -	vf->vring[qid].used = device_area;
> -
> -	return 0;
> +	return ifcvf_set_vq_address(vf, qid, desc_area, driver_area, device_area);
>   }
>   
>   static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH V2] vDPA/ifcvf: allow userspace to suspend a queue
@ 2022-04-27  5:56   ` Jason Wang
  0 siblings, 0 replies; 4+ messages in thread
From: Jason Wang @ 2022-04-27  5:56 UTC (permalink / raw)
  To: Zhu Lingshan, mst; +Cc: netdev, virtualization


在 2022/4/24 19:33, Zhu Lingshan 写道:
> Formerly, ifcvf driver has implemented a lazy-initialization mechanism
> for the virtqueues, it would store all virtqueue config fields that
> passed down from the userspace, then load them to the virtqueues and
> enable the queues upon DRIVER_OK.
>
> To allow the userspace to suspend a virtqueue,
> this commit passes queue_enable to the virtqueue directly through
> set_vq_ready().
>
> This feature requires and this commits implementing all virtqueue
> ops(set_vq_addr, set_vq_num and set_vq_ready) to take immediate
> actions than lazy-initialization, so ifcvf_hw_enable() is retired.
>
> set_features() should take immediate actions as well.
>
> ifcvf_add_status() is retierd because we should not add
> status like FEATURES_OK by ifcvf's decision, this driver should
> only set device status upon vdpa_ops.set_status()
>
> To avoid losing virtqueue configurations caused by multiple
> rounds of reset(), this commit also refactors thed evice reset
> routine, now it simply reset the config handler and the virtqueues,
> and only once device-reset().


It looks like the patch tries to do too many things at one run. I'd 
suggest to split them:


1) on-the-fly set via set_vq_ready(), but I don't see a reason why we 
need to change other lazy stuffs, since setting queue_enable to 1 before 
DRIVER_OK won't start the virtqueue anyhow
2) if necessary, converting the lazy stuffs
3) the synchornize_irq() fixes
4) other stuffs

Thanks


>
> Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
> ---
>   drivers/vdpa/ifcvf/ifcvf_base.c | 150 +++++++++++++++++++-------------
>   drivers/vdpa/ifcvf/ifcvf_base.h |  16 ++--
>   drivers/vdpa/ifcvf/ifcvf_main.c |  81 +++--------------
>   3 files changed, 111 insertions(+), 136 deletions(-)
>
> diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
> index 48c4dadb0c7c..bbc9007a6f34 100644
> --- a/drivers/vdpa/ifcvf/ifcvf_base.c
> +++ b/drivers/vdpa/ifcvf/ifcvf_base.c
> @@ -179,20 +179,7 @@ void ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
>   
>   void ifcvf_reset(struct ifcvf_hw *hw)
>   {
> -	hw->config_cb.callback = NULL;
> -	hw->config_cb.private = NULL;
> -
>   	ifcvf_set_status(hw, 0);
> -	/* flush set_status, make sure VF is stopped, reset */
> -	ifcvf_get_status(hw);
> -}
> -
> -static void ifcvf_add_status(struct ifcvf_hw *hw, u8 status)
> -{
> -	if (status != 0)
> -		status |= ifcvf_get_status(hw);
> -
> -	ifcvf_set_status(hw, status);
>   	ifcvf_get_status(hw);
>   }
>   
> @@ -213,7 +200,7 @@ u64 ifcvf_get_hw_features(struct ifcvf_hw *hw)
>   	return features;
>   }
>   
> -u64 ifcvf_get_features(struct ifcvf_hw *hw)
> +u64 ifcvf_get_device_features(struct ifcvf_hw *hw)
>   {
>   	return hw->hw_features;
>   }
> @@ -280,7 +267,7 @@ void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset,
>   		vp_iowrite8(*p++, hw->dev_cfg + offset + i);
>   }
>   
> -static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
> +void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
>   {
>   	struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
>   
> @@ -289,22 +276,22 @@ static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
>   
>   	vp_iowrite32(1, &cfg->guest_feature_select);
>   	vp_iowrite32(features >> 32, &cfg->guest_feature);
> +
> +	vp_ioread32(&cfg->guest_feature);
>   }
>   
> -static int ifcvf_config_features(struct ifcvf_hw *hw)
> +u64 ifcvf_get_features(struct ifcvf_hw *hw)
>   {
> -	struct ifcvf_adapter *ifcvf;
> +	struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
> +	u64 features;
>   
> -	ifcvf = vf_to_adapter(hw);
> -	ifcvf_set_features(hw, hw->req_features);
> -	ifcvf_add_status(hw, VIRTIO_CONFIG_S_FEATURES_OK);
> +	vp_iowrite32(0, &cfg->device_feature_select);
> +	features = vp_ioread32(&cfg->device_feature);
>   
> -	if (!(ifcvf_get_status(hw) & VIRTIO_CONFIG_S_FEATURES_OK)) {
> -		IFCVF_ERR(ifcvf->pdev, "Failed to set FEATURES_OK status\n");
> -		return -EIO;
> -	}
> +	vp_iowrite32(1, &cfg->device_feature_select);
> +	features |= ((u64)vp_ioread32(&cfg->guest_feature) << 32);
>   
> -	return 0;
> +	return features;
>   }
>   
>   u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
> @@ -331,68 +318,111 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
>   	ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
>   	q_pair_id = qid / hw->nr_vring;
>   	avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
> -	hw->vring[qid].last_avail_idx = num;
>   	vp_iowrite16(num, avail_idx_addr);
>   
>   	return 0;
>   }
>   
> -static int ifcvf_hw_enable(struct ifcvf_hw *hw)
> +void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num)
>   {
> -	struct virtio_pci_common_cfg __iomem *cfg;
> -	u32 i;
> +	struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
>   
> -	cfg = hw->common_cfg;
> -	for (i = 0; i < hw->nr_vring; i++) {
> -		if (!hw->vring[i].ready)
> -			break;
> +	vp_iowrite16(qid, &cfg->queue_select);
> +	vp_iowrite16(num, &cfg->queue_size);
> +}
>   
> -		vp_iowrite16(i, &cfg->queue_select);
> -		vp_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
> -				     &cfg->queue_desc_hi);
> -		vp_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
> -				      &cfg->queue_avail_hi);
> -		vp_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
> -				     &cfg->queue_used_hi);
> -		vp_iowrite16(hw->vring[i].size, &cfg->queue_size);
> -		ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx);
> -		vp_iowrite16(1, &cfg->queue_enable);
> -	}
> +int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
> +			 u64 driver_area, u64 device_area)
> +{
> +	struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
> +
> +	vp_iowrite16(qid, &cfg->queue_select);
> +	vp_iowrite64_twopart(desc_area, &cfg->queue_desc_lo,
> +			     &cfg->queue_desc_hi);
> +	vp_iowrite64_twopart(driver_area, &cfg->queue_avail_lo,
> +			     &cfg->queue_avail_hi);
> +	vp_iowrite64_twopart(device_area, &cfg->queue_used_lo,
> +			     &cfg->queue_used_hi);
>   
>   	return 0;
>   }
>   
> -static void ifcvf_hw_disable(struct ifcvf_hw *hw)
> +void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready)
>   {
> -	u32 i;
> +	struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
> +
> +	vp_iowrite16(qid, &cfg->queue_select);
> +	/* write 0 to queue_enable will suspend a queue*/
> +	vp_iowrite16(ready, &cfg->queue_enable);
> +}
> +
> +bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid)
> +{
> +	struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
> +	bool queue_enable;
> +
> +	vp_iowrite16(qid, &cfg->queue_select);
> +	queue_enable = vp_ioread16(&cfg->queue_enable);
> +
> +	return (bool)queue_enable;
> +}
> +
> +static void synchronize_per_vq_irq(struct ifcvf_hw *hw)
> +{
> +	int i;
>   
> -	ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
>   	for (i = 0; i < hw->nr_vring; i++) {
> -		ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR);
> +		if (hw->vring[i].irq != -EINVAL)
> +			synchronize_irq(hw->vring[i].irq);
>   	}
>   }
>   
> -int ifcvf_start_hw(struct ifcvf_hw *hw)
> +static void synchronize_vqs_reused_irq(struct ifcvf_hw *hw)
>   {
> -	ifcvf_reset(hw);
> -	ifcvf_add_status(hw, VIRTIO_CONFIG_S_ACKNOWLEDGE);
> -	ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER);
> +	if (hw->vqs_reused_irq != -EINVAL)
> +		synchronize_irq(hw->vqs_reused_irq);
> +}
>   
> -	if (ifcvf_config_features(hw) < 0)
> -		return -EINVAL;
> +static void synchronize_vq_irq(struct ifcvf_hw *hw)
> +{
> +	u8 status = hw->msix_vector_status;
>   
> -	if (ifcvf_hw_enable(hw) < 0)
> -		return -EINVAL;
> +	if (status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
> +		synchronize_per_vq_irq(hw);
> +	else
> +		synchronize_vqs_reused_irq(hw);
> +}
>   
> -	ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER_OK);
> +static void synchronize_config_irq(struct ifcvf_hw *hw)
> +{
> +	if (hw->config_irq != -EINVAL)
> +		synchronize_irq(hw->config_irq);
> +}
>   
> -	return 0;
> +static void ifcvf_reset_vring(struct ifcvf_hw *hw)
> +{
> +	int i;
> +
> +	for (i = 0; i < hw->nr_vring; i++) {
> +		synchronize_vq_irq(hw);
> +		hw->vring[i].cb.callback = NULL;
> +		hw->vring[i].cb.private = NULL;
> +		ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR);
> +	}
> +}
> +
> +static void ifcvf_reset_config_handler(struct ifcvf_hw *hw)
> +{
> +	synchronize_config_irq(hw);
> +	hw->config_cb.callback = NULL;
> +	hw->config_cb.private = NULL;
> +	ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
>   }
>   
>   void ifcvf_stop_hw(struct ifcvf_hw *hw)
>   {
> -	ifcvf_hw_disable(hw);
> -	ifcvf_reset(hw);
> +	ifcvf_reset_vring(hw);
> +	ifcvf_reset_config_handler(hw);
>   }
>   
>   void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
> diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
> index 115b61f4924b..f3dce0d795cb 100644
> --- a/drivers/vdpa/ifcvf/ifcvf_base.h
> +++ b/drivers/vdpa/ifcvf/ifcvf_base.h
> @@ -49,12 +49,6 @@
>   #define MSIX_VECTOR_DEV_SHARED			3
>   
>   struct vring_info {
> -	u64 desc;
> -	u64 avail;
> -	u64 used;
> -	u16 size;
> -	u16 last_avail_idx;
> -	bool ready;
>   	void __iomem *notify_addr;
>   	phys_addr_t notify_pa;
>   	u32 irq;
> @@ -76,7 +70,6 @@ struct ifcvf_hw {
>   	phys_addr_t notify_base_pa;
>   	u32 notify_off_multiplier;
>   	u32 dev_type;
> -	u64 req_features;
>   	u64 hw_features;
>   	struct virtio_pci_common_cfg __iomem *common_cfg;
>   	void __iomem *dev_cfg;
> @@ -123,7 +116,7 @@ u8 ifcvf_get_status(struct ifcvf_hw *hw);
>   void ifcvf_set_status(struct ifcvf_hw *hw, u8 status);
>   void io_write64_twopart(u64 val, u32 *lo, u32 *hi);
>   void ifcvf_reset(struct ifcvf_hw *hw);
> -u64 ifcvf_get_features(struct ifcvf_hw *hw);
> +u64 ifcvf_get_device_features(struct ifcvf_hw *hw);
>   u64 ifcvf_get_hw_features(struct ifcvf_hw *hw);
>   int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features);
>   u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid);
> @@ -131,6 +124,13 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
>   struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
>   int ifcvf_probed_virtio_net(struct ifcvf_hw *hw);
>   u32 ifcvf_get_config_size(struct ifcvf_hw *hw);
> +int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
> +			 u64 driver_area, u64 device_area);
>   u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector);
>   u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector);
> +void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num);
> +void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready);
> +bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid);
> +void ifcvf_set_features(struct ifcvf_hw *hw, u64 features);
> +u64 ifcvf_get_features(struct ifcvf_hw *hw);
>   #endif /* _IFCVF_H_ */
> diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
> index 4366320fb68d..0257ba98cffe 100644
> --- a/drivers/vdpa/ifcvf/ifcvf_main.c
> +++ b/drivers/vdpa/ifcvf/ifcvf_main.c
> @@ -358,53 +358,6 @@ static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
>   	return 0;
>   }
>   
> -static int ifcvf_start_datapath(void *private)
> -{
> -	struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
> -	u8 status;
> -	int ret;
> -
> -	ret = ifcvf_start_hw(vf);
> -	if (ret < 0) {
> -		status = ifcvf_get_status(vf);
> -		status |= VIRTIO_CONFIG_S_FAILED;
> -		ifcvf_set_status(vf, status);
> -	}
> -
> -	return ret;
> -}
> -
> -static int ifcvf_stop_datapath(void *private)
> -{
> -	struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
> -	int i;
> -
> -	for (i = 0; i < vf->nr_vring; i++)
> -		vf->vring[i].cb.callback = NULL;
> -
> -	ifcvf_stop_hw(vf);
> -
> -	return 0;
> -}
> -
> -static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
> -{
> -	struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
> -	int i;
> -
> -	for (i = 0; i < vf->nr_vring; i++) {
> -		vf->vring[i].last_avail_idx = 0;
> -		vf->vring[i].desc = 0;
> -		vf->vring[i].avail = 0;
> -		vf->vring[i].used = 0;
> -		vf->vring[i].ready = 0;
> -		vf->vring[i].cb.callback = NULL;
> -		vf->vring[i].cb.private = NULL;
> -	}
> -
> -	ifcvf_reset(vf);
> -}
> -
>   static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
>   {
>   	return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
> @@ -426,7 +379,7 @@ static u64 ifcvf_vdpa_get_device_features(struct vdpa_device *vdpa_dev)
>   	u64 features;
>   
>   	if (type == VIRTIO_ID_NET || type == VIRTIO_ID_BLOCK)
> -		features = ifcvf_get_features(vf);
> +		features = ifcvf_get_device_features(vf);
>   	else {
>   		features = 0;
>   		IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
> @@ -444,7 +397,7 @@ static int ifcvf_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 feat
>   	if (ret)
>   		return ret;
>   
> -	vf->req_features = features;
> +	ifcvf_set_features(vf, features);
>   
>   	return 0;
>   }
> @@ -453,7 +406,7 @@ static u64 ifcvf_vdpa_get_driver_features(struct vdpa_device *vdpa_dev)
>   {
>   	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>   
> -	return vf->req_features;
> +	return ifcvf_get_features(vf);
>   }
>   
>   static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
> @@ -486,11 +439,6 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
>   			ifcvf_set_status(vf, status);
>   			return;
>   		}
> -
> -		if (ifcvf_start_datapath(adapter) < 0)
> -			IFCVF_ERR(adapter->pdev,
> -				  "Failed to set ifcvf vdpa  status %u\n",
> -				  status);
>   	}
>   
>   	ifcvf_set_status(vf, status);
> @@ -509,12 +457,10 @@ static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
>   	if (status_old == 0)
>   		return 0;
>   
> -	if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
> -		ifcvf_stop_datapath(adapter);
> -		ifcvf_free_irq(adapter);
> -	}
> +	ifcvf_stop_hw(vf);
> +	ifcvf_free_irq(adapter);
>   
> -	ifcvf_reset_vring(adapter);
> +	ifcvf_reset(vf);
>   
>   	return 0;
>   }
> @@ -554,14 +500,17 @@ static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
>   {
>   	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>   
> -	vf->vring[qid].ready = ready;
> +	ifcvf_set_vq_ready(vf, qid, ready);
>   }
>   
>   static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
>   {
>   	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
> +	bool ready;
> +
> +	ready = ifcvf_get_vq_ready(vf, qid);
>   
> -	return vf->vring[qid].ready;
> +	return ready;
>   }
>   
>   static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
> @@ -569,7 +518,7 @@ static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
>   {
>   	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>   
> -	vf->vring[qid].size = num;
> +	ifcvf_set_vq_num(vf, qid, num);
>   }
>   
>   static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
> @@ -578,11 +527,7 @@ static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
>   {
>   	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>   
> -	vf->vring[qid].desc = desc_area;
> -	vf->vring[qid].avail = driver_area;
> -	vf->vring[qid].used = device_area;
> -
> -	return 0;
> +	return ifcvf_set_vq_address(vf, qid, desc_area, driver_area, device_area);
>   }
>   
>   static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH V2] vDPA/ifcvf: allow userspace to suspend a queue
  2022-04-27  5:56   ` Jason Wang
  (?)
@ 2022-04-27  6:16   ` Zhu, Lingshan
  -1 siblings, 0 replies; 4+ messages in thread
From: Zhu, Lingshan @ 2022-04-27  6:16 UTC (permalink / raw)
  To: Jason Wang, mst; +Cc: virtualization, netdev



On 4/27/2022 1:56 PM, Jason Wang wrote:
>
> 在 2022/4/24 19:33, Zhu Lingshan 写道:
>> Formerly, ifcvf driver has implemented a lazy-initialization mechanism
>> for the virtqueues, it would store all virtqueue config fields that
>> passed down from the userspace, then load them to the virtqueues and
>> enable the queues upon DRIVER_OK.
>>
>> To allow the userspace to suspend a virtqueue,
>> this commit passes queue_enable to the virtqueue directly through
>> set_vq_ready().
>>
>> This feature requires and this commits implementing all virtqueue
>> ops(set_vq_addr, set_vq_num and set_vq_ready) to take immediate
>> actions than lazy-initialization, so ifcvf_hw_enable() is retired.
>>
>> set_features() should take immediate actions as well.
>>
>> ifcvf_add_status() is retierd because we should not add
>> status like FEATURES_OK by ifcvf's decision, this driver should
>> only set device status upon vdpa_ops.set_status()
>>
>> To avoid losing virtqueue configurations caused by multiple
>> rounds of reset(), this commit also refactors thed evice reset
>> routine, now it simply reset the config handler and the virtqueues,
>> and only once device-reset().
>
>
> It looks like the patch tries to do too many things at one run. I'd 
> suggest to split them:
>
>
> 1) on-the-fly set via set_vq_ready(), but I don't see a reason why we 
> need to change other lazy stuffs, since setting queue_enable to 1 
> before DRIVER_OK won't start the virtqueue anyhow
> 2) if necessary, converting the lazy stuffs
> 3) the synchornize_irq() fixes
> 4) other stuffs
Thanks! I will try!
>
> Thanks
>
>
>>
>> Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
>> ---
>>   drivers/vdpa/ifcvf/ifcvf_base.c | 150 +++++++++++++++++++-------------
>>   drivers/vdpa/ifcvf/ifcvf_base.h |  16 ++--
>>   drivers/vdpa/ifcvf/ifcvf_main.c |  81 +++--------------
>>   3 files changed, 111 insertions(+), 136 deletions(-)
>>
>> diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c 
>> b/drivers/vdpa/ifcvf/ifcvf_base.c
>> index 48c4dadb0c7c..bbc9007a6f34 100644
>> --- a/drivers/vdpa/ifcvf/ifcvf_base.c
>> +++ b/drivers/vdpa/ifcvf/ifcvf_base.c
>> @@ -179,20 +179,7 @@ void ifcvf_set_status(struct ifcvf_hw *hw, u8 
>> status)
>>     void ifcvf_reset(struct ifcvf_hw *hw)
>>   {
>> -    hw->config_cb.callback = NULL;
>> -    hw->config_cb.private = NULL;
>> -
>>       ifcvf_set_status(hw, 0);
>> -    /* flush set_status, make sure VF is stopped, reset */
>> -    ifcvf_get_status(hw);
>> -}
>> -
>> -static void ifcvf_add_status(struct ifcvf_hw *hw, u8 status)
>> -{
>> -    if (status != 0)
>> -        status |= ifcvf_get_status(hw);
>> -
>> -    ifcvf_set_status(hw, status);
>>       ifcvf_get_status(hw);
>>   }
>>   @@ -213,7 +200,7 @@ u64 ifcvf_get_hw_features(struct ifcvf_hw *hw)
>>       return features;
>>   }
>>   -u64 ifcvf_get_features(struct ifcvf_hw *hw)
>> +u64 ifcvf_get_device_features(struct ifcvf_hw *hw)
>>   {
>>       return hw->hw_features;
>>   }
>> @@ -280,7 +267,7 @@ void ifcvf_write_dev_config(struct ifcvf_hw *hw, 
>> u64 offset,
>>           vp_iowrite8(*p++, hw->dev_cfg + offset + i);
>>   }
>>   -static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
>> +void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
>>   {
>>       struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
>>   @@ -289,22 +276,22 @@ static void ifcvf_set_features(struct 
>> ifcvf_hw *hw, u64 features)
>>         vp_iowrite32(1, &cfg->guest_feature_select);
>>       vp_iowrite32(features >> 32, &cfg->guest_feature);
>> +
>> +    vp_ioread32(&cfg->guest_feature);
>>   }
>>   -static int ifcvf_config_features(struct ifcvf_hw *hw)
>> +u64 ifcvf_get_features(struct ifcvf_hw *hw)
>>   {
>> -    struct ifcvf_adapter *ifcvf;
>> +    struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
>> +    u64 features;
>>   -    ifcvf = vf_to_adapter(hw);
>> -    ifcvf_set_features(hw, hw->req_features);
>> -    ifcvf_add_status(hw, VIRTIO_CONFIG_S_FEATURES_OK);
>> +    vp_iowrite32(0, &cfg->device_feature_select);
>> +    features = vp_ioread32(&cfg->device_feature);
>>   -    if (!(ifcvf_get_status(hw) & VIRTIO_CONFIG_S_FEATURES_OK)) {
>> -        IFCVF_ERR(ifcvf->pdev, "Failed to set FEATURES_OK status\n");
>> -        return -EIO;
>> -    }
>> +    vp_iowrite32(1, &cfg->device_feature_select);
>> +    features |= ((u64)vp_ioread32(&cfg->guest_feature) << 32);
>>   -    return 0;
>> +    return features;
>>   }
>>     u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
>> @@ -331,68 +318,111 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, 
>> u16 qid, u16 num)
>>       ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
>>       q_pair_id = qid / hw->nr_vring;
>>       avail_idx_addr = 
>> &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
>> -    hw->vring[qid].last_avail_idx = num;
>>       vp_iowrite16(num, avail_idx_addr);
>>         return 0;
>>   }
>>   -static int ifcvf_hw_enable(struct ifcvf_hw *hw)
>> +void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num)
>>   {
>> -    struct virtio_pci_common_cfg __iomem *cfg;
>> -    u32 i;
>> +    struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
>>   -    cfg = hw->common_cfg;
>> -    for (i = 0; i < hw->nr_vring; i++) {
>> -        if (!hw->vring[i].ready)
>> -            break;
>> +    vp_iowrite16(qid, &cfg->queue_select);
>> +    vp_iowrite16(num, &cfg->queue_size);
>> +}
>>   -        vp_iowrite16(i, &cfg->queue_select);
>> -        vp_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
>> -                     &cfg->queue_desc_hi);
>> -        vp_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
>> -                      &cfg->queue_avail_hi);
>> -        vp_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
>> -                     &cfg->queue_used_hi);
>> -        vp_iowrite16(hw->vring[i].size, &cfg->queue_size);
>> -        ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx);
>> -        vp_iowrite16(1, &cfg->queue_enable);
>> -    }
>> +int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
>> +             u64 driver_area, u64 device_area)
>> +{
>> +    struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
>> +
>> +    vp_iowrite16(qid, &cfg->queue_select);
>> +    vp_iowrite64_twopart(desc_area, &cfg->queue_desc_lo,
>> +                 &cfg->queue_desc_hi);
>> +    vp_iowrite64_twopart(driver_area, &cfg->queue_avail_lo,
>> +                 &cfg->queue_avail_hi);
>> +    vp_iowrite64_twopart(device_area, &cfg->queue_used_lo,
>> +                 &cfg->queue_used_hi);
>>         return 0;
>>   }
>>   -static void ifcvf_hw_disable(struct ifcvf_hw *hw)
>> +void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready)
>>   {
>> -    u32 i;
>> +    struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
>> +
>> +    vp_iowrite16(qid, &cfg->queue_select);
>> +    /* write 0 to queue_enable will suspend a queue*/
>> +    vp_iowrite16(ready, &cfg->queue_enable);
>> +}
>> +
>> +bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid)
>> +{
>> +    struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
>> +    bool queue_enable;
>> +
>> +    vp_iowrite16(qid, &cfg->queue_select);
>> +    queue_enable = vp_ioread16(&cfg->queue_enable);
>> +
>> +    return (bool)queue_enable;
>> +}
>> +
>> +static void synchronize_per_vq_irq(struct ifcvf_hw *hw)
>> +{
>> +    int i;
>>   -    ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
>>       for (i = 0; i < hw->nr_vring; i++) {
>> -        ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR);
>> +        if (hw->vring[i].irq != -EINVAL)
>> +            synchronize_irq(hw->vring[i].irq);
>>       }
>>   }
>>   -int ifcvf_start_hw(struct ifcvf_hw *hw)
>> +static void synchronize_vqs_reused_irq(struct ifcvf_hw *hw)
>>   {
>> -    ifcvf_reset(hw);
>> -    ifcvf_add_status(hw, VIRTIO_CONFIG_S_ACKNOWLEDGE);
>> -    ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER);
>> +    if (hw->vqs_reused_irq != -EINVAL)
>> +        synchronize_irq(hw->vqs_reused_irq);
>> +}
>>   -    if (ifcvf_config_features(hw) < 0)
>> -        return -EINVAL;
>> +static void synchronize_vq_irq(struct ifcvf_hw *hw)
>> +{
>> +    u8 status = hw->msix_vector_status;
>>   -    if (ifcvf_hw_enable(hw) < 0)
>> -        return -EINVAL;
>> +    if (status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
>> +        synchronize_per_vq_irq(hw);
>> +    else
>> +        synchronize_vqs_reused_irq(hw);
>> +}
>>   -    ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER_OK);
>> +static void synchronize_config_irq(struct ifcvf_hw *hw)
>> +{
>> +    if (hw->config_irq != -EINVAL)
>> +        synchronize_irq(hw->config_irq);
>> +}
>>   -    return 0;
>> +static void ifcvf_reset_vring(struct ifcvf_hw *hw)
>> +{
>> +    int i;
>> +
>> +    for (i = 0; i < hw->nr_vring; i++) {
>> +        synchronize_vq_irq(hw);
>> +        hw->vring[i].cb.callback = NULL;
>> +        hw->vring[i].cb.private = NULL;
>> +        ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR);
>> +    }
>> +}
>> +
>> +static void ifcvf_reset_config_handler(struct ifcvf_hw *hw)
>> +{
>> +    synchronize_config_irq(hw);
>> +    hw->config_cb.callback = NULL;
>> +    hw->config_cb.private = NULL;
>> +    ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
>>   }
>>     void ifcvf_stop_hw(struct ifcvf_hw *hw)
>>   {
>> -    ifcvf_hw_disable(hw);
>> -    ifcvf_reset(hw);
>> +    ifcvf_reset_vring(hw);
>> +    ifcvf_reset_config_handler(hw);
>>   }
>>     void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
>> diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h 
>> b/drivers/vdpa/ifcvf/ifcvf_base.h
>> index 115b61f4924b..f3dce0d795cb 100644
>> --- a/drivers/vdpa/ifcvf/ifcvf_base.h
>> +++ b/drivers/vdpa/ifcvf/ifcvf_base.h
>> @@ -49,12 +49,6 @@
>>   #define MSIX_VECTOR_DEV_SHARED            3
>>     struct vring_info {
>> -    u64 desc;
>> -    u64 avail;
>> -    u64 used;
>> -    u16 size;
>> -    u16 last_avail_idx;
>> -    bool ready;
>>       void __iomem *notify_addr;
>>       phys_addr_t notify_pa;
>>       u32 irq;
>> @@ -76,7 +70,6 @@ struct ifcvf_hw {
>>       phys_addr_t notify_base_pa;
>>       u32 notify_off_multiplier;
>>       u32 dev_type;
>> -    u64 req_features;
>>       u64 hw_features;
>>       struct virtio_pci_common_cfg __iomem *common_cfg;
>>       void __iomem *dev_cfg;
>> @@ -123,7 +116,7 @@ u8 ifcvf_get_status(struct ifcvf_hw *hw);
>>   void ifcvf_set_status(struct ifcvf_hw *hw, u8 status);
>>   void io_write64_twopart(u64 val, u32 *lo, u32 *hi);
>>   void ifcvf_reset(struct ifcvf_hw *hw);
>> -u64 ifcvf_get_features(struct ifcvf_hw *hw);
>> +u64 ifcvf_get_device_features(struct ifcvf_hw *hw);
>>   u64 ifcvf_get_hw_features(struct ifcvf_hw *hw);
>>   int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features);
>>   u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid);
>> @@ -131,6 +124,13 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 
>> qid, u16 num);
>>   struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
>>   int ifcvf_probed_virtio_net(struct ifcvf_hw *hw);
>>   u32 ifcvf_get_config_size(struct ifcvf_hw *hw);
>> +int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
>> +             u64 driver_area, u64 device_area);
>>   u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector);
>>   u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector);
>> +void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num);
>> +void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready);
>> +bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid);
>> +void ifcvf_set_features(struct ifcvf_hw *hw, u64 features);
>> +u64 ifcvf_get_features(struct ifcvf_hw *hw);
>>   #endif /* _IFCVF_H_ */
>> diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c 
>> b/drivers/vdpa/ifcvf/ifcvf_main.c
>> index 4366320fb68d..0257ba98cffe 100644
>> --- a/drivers/vdpa/ifcvf/ifcvf_main.c
>> +++ b/drivers/vdpa/ifcvf/ifcvf_main.c
>> @@ -358,53 +358,6 @@ static int ifcvf_request_irq(struct 
>> ifcvf_adapter *adapter)
>>       return 0;
>>   }
>>   -static int ifcvf_start_datapath(void *private)
>> -{
>> -    struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
>> -    u8 status;
>> -    int ret;
>> -
>> -    ret = ifcvf_start_hw(vf);
>> -    if (ret < 0) {
>> -        status = ifcvf_get_status(vf);
>> -        status |= VIRTIO_CONFIG_S_FAILED;
>> -        ifcvf_set_status(vf, status);
>> -    }
>> -
>> -    return ret;
>> -}
>> -
>> -static int ifcvf_stop_datapath(void *private)
>> -{
>> -    struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
>> -    int i;
>> -
>> -    for (i = 0; i < vf->nr_vring; i++)
>> -        vf->vring[i].cb.callback = NULL;
>> -
>> -    ifcvf_stop_hw(vf);
>> -
>> -    return 0;
>> -}
>> -
>> -static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
>> -{
>> -    struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
>> -    int i;
>> -
>> -    for (i = 0; i < vf->nr_vring; i++) {
>> -        vf->vring[i].last_avail_idx = 0;
>> -        vf->vring[i].desc = 0;
>> -        vf->vring[i].avail = 0;
>> -        vf->vring[i].used = 0;
>> -        vf->vring[i].ready = 0;
>> -        vf->vring[i].cb.callback = NULL;
>> -        vf->vring[i].cb.private = NULL;
>> -    }
>> -
>> -    ifcvf_reset(vf);
>> -}
>> -
>>   static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device 
>> *vdpa_dev)
>>   {
>>       return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
>> @@ -426,7 +379,7 @@ static u64 ifcvf_vdpa_get_device_features(struct 
>> vdpa_device *vdpa_dev)
>>       u64 features;
>>         if (type == VIRTIO_ID_NET || type == VIRTIO_ID_BLOCK)
>> -        features = ifcvf_get_features(vf);
>> +        features = ifcvf_get_device_features(vf);
>>       else {
>>           features = 0;
>>           IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
>> @@ -444,7 +397,7 @@ static int ifcvf_vdpa_set_driver_features(struct 
>> vdpa_device *vdpa_dev, u64 feat
>>       if (ret)
>>           return ret;
>>   -    vf->req_features = features;
>> +    ifcvf_set_features(vf, features);
>>         return 0;
>>   }
>> @@ -453,7 +406,7 @@ static u64 ifcvf_vdpa_get_driver_features(struct 
>> vdpa_device *vdpa_dev)
>>   {
>>       struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>>   -    return vf->req_features;
>> +    return ifcvf_get_features(vf);
>>   }
>>     static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
>> @@ -486,11 +439,6 @@ static void ifcvf_vdpa_set_status(struct 
>> vdpa_device *vdpa_dev, u8 status)
>>               ifcvf_set_status(vf, status);
>>               return;
>>           }
>> -
>> -        if (ifcvf_start_datapath(adapter) < 0)
>> -            IFCVF_ERR(adapter->pdev,
>> -                  "Failed to set ifcvf vdpa  status %u\n",
>> -                  status);
>>       }
>>         ifcvf_set_status(vf, status);
>> @@ -509,12 +457,10 @@ static int ifcvf_vdpa_reset(struct vdpa_device 
>> *vdpa_dev)
>>       if (status_old == 0)
>>           return 0;
>>   -    if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
>> -        ifcvf_stop_datapath(adapter);
>> -        ifcvf_free_irq(adapter);
>> -    }
>> +    ifcvf_stop_hw(vf);
>> +    ifcvf_free_irq(adapter);
>>   -    ifcvf_reset_vring(adapter);
>> +    ifcvf_reset(vf);
>>         return 0;
>>   }
>> @@ -554,14 +500,17 @@ static void ifcvf_vdpa_set_vq_ready(struct 
>> vdpa_device *vdpa_dev,
>>   {
>>       struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>>   -    vf->vring[qid].ready = ready;
>> +    ifcvf_set_vq_ready(vf, qid, ready);
>>   }
>>     static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, 
>> u16 qid)
>>   {
>>       struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>> +    bool ready;
>> +
>> +    ready = ifcvf_get_vq_ready(vf, qid);
>>   -    return vf->vring[qid].ready;
>> +    return ready;
>>   }
>>     static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, 
>> u16 qid,
>> @@ -569,7 +518,7 @@ static void ifcvf_vdpa_set_vq_num(struct 
>> vdpa_device *vdpa_dev, u16 qid,
>>   {
>>       struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>>   -    vf->vring[qid].size = num;
>> +    ifcvf_set_vq_num(vf, qid, num);
>>   }
>>     static int ifcvf_vdpa_set_vq_address(struct vdpa_device 
>> *vdpa_dev, u16 qid,
>> @@ -578,11 +527,7 @@ static int ifcvf_vdpa_set_vq_address(struct 
>> vdpa_device *vdpa_dev, u16 qid,
>>   {
>>       struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
>>   -    vf->vring[qid].desc = desc_area;
>> -    vf->vring[qid].avail = driver_area;
>> -    vf->vring[qid].used = device_area;
>> -
>> -    return 0;
>> +    return ifcvf_set_vq_address(vf, qid, desc_area, driver_area, 
>> device_area);
>>   }
>>     static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 
>> qid)
>


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2022-04-27  6:16 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-04-24 11:33 [PATCH V2] vDPA/ifcvf: allow userspace to suspend a queue Zhu Lingshan
2022-04-27  5:56 ` Jason Wang
2022-04-27  5:56   ` Jason Wang
2022-04-27  6:16   ` Zhu, Lingshan

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.