All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jason Wang <jasowang@redhat.com>
To: Eli Cohen <elic@nvidia.com>
Cc: mst@redhat.com, lulu@redhat.com, kvm@vger.kernel.org,
	virtualization@lists.linux-foundation.org,
	netdev@vger.kernel.org, linux-kernel@vger.kernel.org,
	rob.miller@broadcom.com, lingshan.zhu@intel.com,
	eperezma@redhat.com, hanand@xilinx.com, mhabets@solarflare.com,
	eli@mellanox.com, amorenoz@redhat.com,
	maxime.coquelin@redhat.com, stefanha@redhat.com,
	sgarzare@redhat.com
Subject: Re: [RFC PATCH 05/24] vhost-vdpa: passing iotlb to IOMMU mapping helpers
Date: Fri, 9 Oct 2020 10:01:05 +0800	[thread overview]
Message-ID: <5f083453-d070-d8a8-1f75-5de1c299cd0b@redhat.com> (raw)
In-Reply-To: <20200930112609.GA223360@mtl-vdi-166.wap.labs.mlnx>


On 2020/9/30 下午7:26, Eli Cohen wrote:
> On Thu, Sep 24, 2020 at 11:21:06AM +0800, Jason Wang wrote:
>> To prepare for the ASID support for vhost-vdpa, try to pass IOTLB
>> object to dma helpers.
> Maybe it's worth mentioning here that this patch does not change any
> functionality and is presented as a preparation for passing different
> iotlb's instead of using dev->iotlb


Right, let me add them in the next version.

Thanks


>
>> Signed-off-by: Jason Wang <jasowang@redhat.com>
>> ---
>>   drivers/vhost/vdpa.c | 40 ++++++++++++++++++++++------------------
>>   1 file changed, 22 insertions(+), 18 deletions(-)
>>
>> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
>> index 9c641274b9f3..74bef1c15a70 100644
>> --- a/drivers/vhost/vdpa.c
>> +++ b/drivers/vhost/vdpa.c
>> @@ -489,10 +489,11 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
>>   	return r;
>>   }
>>   
>> -static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
>> +static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
>> +				   struct vhost_iotlb *iotlb,
>> +				   u64 start, u64 last)
>>   {
>>   	struct vhost_dev *dev = &v->vdev;
>> -	struct vhost_iotlb *iotlb = dev->iotlb;
>>   	struct vhost_iotlb_map *map;
>>   	struct page *page;
>>   	unsigned long pfn, pinned;
>> @@ -514,8 +515,9 @@ static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
>>   static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
>>   {
>>   	struct vhost_dev *dev = &v->vdev;
>> +	struct vhost_iotlb *iotlb = dev->iotlb;
>>   
>> -	vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
>> +	vhost_vdpa_iotlb_unmap(v, iotlb, 0ULL, 0ULL - 1);
>>   	kfree(dev->iotlb);
>>   	dev->iotlb = NULL;
>>   }
>> @@ -542,15 +544,14 @@ static int perm_to_iommu_flags(u32 perm)
>>   	return flags | IOMMU_CACHE;
>>   }
>>   
>> -static int vhost_vdpa_map(struct vhost_vdpa *v,
>> +static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
>>   			  u64 iova, u64 size, u64 pa, u32 perm)
>>   {
>> -	struct vhost_dev *dev = &v->vdev;
>>   	struct vdpa_device *vdpa = v->vdpa;
>>   	const struct vdpa_config_ops *ops = vdpa->config;
>>   	int r = 0;
>>   
>> -	r = vhost_iotlb_add_range(dev->iotlb, iova, iova + size - 1,
>> +	r = vhost_iotlb_add_range(iotlb, iova, iova + size - 1,
>>   				  pa, perm);
>>   	if (r)
>>   		return r;
>> @@ -559,7 +560,7 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
>>   		r = ops->dma_map(vdpa, iova, size, pa, perm);
>>   	} else if (ops->set_map) {
>>   		if (!v->in_batch)
>> -			r = ops->set_map(vdpa, dev->iotlb);
>> +			r = ops->set_map(vdpa, iotlb);
>>   	} else {
>>   		r = iommu_map(v->domain, iova, pa, size,
>>   			      perm_to_iommu_flags(perm));
>> @@ -568,29 +569,30 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
>>   	return r;
>>   }
>>   
>> -static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
>> +static void vhost_vdpa_unmap(struct vhost_vdpa *v,
>> +			     struct vhost_iotlb *iotlb,
>> +			     u64 iova, u64 size)
>>   {
>> -	struct vhost_dev *dev = &v->vdev;
>>   	struct vdpa_device *vdpa = v->vdpa;
>>   	const struct vdpa_config_ops *ops = vdpa->config;
>>   
>> -	vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
>> +	vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1);
>>   
>>   	if (ops->dma_map) {
>>   		ops->dma_unmap(vdpa, iova, size);
>>   	} else if (ops->set_map) {
>>   		if (!v->in_batch)
>> -			ops->set_map(vdpa, dev->iotlb);
>> +			ops->set_map(vdpa, iotlb);
>>   	} else {
>>   		iommu_unmap(v->domain, iova, size);
>>   	}
>>   }
>>   
>>   static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>> +					   struct vhost_iotlb *iotlb,
>>   					   struct vhost_iotlb_msg *msg)
>>   {
>>   	struct vhost_dev *dev = &v->vdev;
>> -	struct vhost_iotlb *iotlb = dev->iotlb;
>>   	struct page **page_list;
>>   	unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
>>   	unsigned int gup_flags = FOLL_LONGTERM;
>> @@ -644,7 +646,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>>   			if (last_pfn && (this_pfn != last_pfn + 1)) {
>>   				/* Pin a contiguous chunk of memory */
>>   				csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
>> -				if (vhost_vdpa_map(v, iova, csize,
>> +				if (vhost_vdpa_map(v, iotlb, iova, csize,
>>   						   map_pfn << PAGE_SHIFT,
>>   						   msg->perm))
>>   					goto out;
>> @@ -660,11 +662,12 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>>   	}
>>   
>>   	/* Pin the rest chunk */
>> -	ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
>> +	ret = vhost_vdpa_map(v, iotlb, iova,
>> +			     (last_pfn - map_pfn + 1) << PAGE_SHIFT,
>>   			     map_pfn << PAGE_SHIFT, msg->perm);
>>   out:
>>   	if (ret) {
>> -		vhost_vdpa_unmap(v, msg->iova, msg->size);
>> +		vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size);
>>   		atomic64_sub(npages, &dev->mm->pinned_vm);
>>   	}
>>   	mmap_read_unlock(dev->mm);
>> @@ -678,6 +681,7 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
>>   	struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
>>   	struct vdpa_device *vdpa = v->vdpa;
>>   	const struct vdpa_config_ops *ops = vdpa->config;
>> +	struct vhost_iotlb *iotlb = dev->iotlb;
>>   	int r = 0;
>>   
>>   	r = vhost_dev_check_owner(dev);
>> @@ -686,17 +690,17 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
>>   
>>   	switch (msg->type) {
>>   	case VHOST_IOTLB_UPDATE:
>> -		r = vhost_vdpa_process_iotlb_update(v, msg);
>> +		r = vhost_vdpa_process_iotlb_update(v, iotlb, msg);
>>   		break;
>>   	case VHOST_IOTLB_INVALIDATE:
>> -		vhost_vdpa_unmap(v, msg->iova, msg->size);
>> +		vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size);
>>   		break;
>>   	case VHOST_IOTLB_BATCH_BEGIN:
>>   		v->in_batch = true;
>>   		break;
>>   	case VHOST_IOTLB_BATCH_END:
>>   		if (v->in_batch && ops->set_map)
>> -			ops->set_map(vdpa, dev->iotlb);
>> +			ops->set_map(vdpa, iotlb);
>>   		v->in_batch = false;
>>   		break;
>>   	default:
>> -- 
>> 2.20.1
>>


WARNING: multiple messages have this Message-ID (diff)
From: Jason Wang <jasowang@redhat.com>
To: Eli Cohen <elic@nvidia.com>
Cc: lulu@redhat.com, kvm@vger.kernel.org, mst@redhat.com,
	netdev@vger.kernel.org, mhabets@solarflare.com,
	linux-kernel@vger.kernel.org,
	virtualization@lists.linux-foundation.org, eperezma@redhat.com,
	hanand@xilinx.com, stefanha@redhat.com, eli@mellanox.com,
	maxime.coquelin@redhat.com, lingshan.zhu@intel.com,
	rob.miller@broadcom.com
Subject: Re: [RFC PATCH 05/24] vhost-vdpa: passing iotlb to IOMMU mapping helpers
Date: Fri, 9 Oct 2020 10:01:05 +0800	[thread overview]
Message-ID: <5f083453-d070-d8a8-1f75-5de1c299cd0b@redhat.com> (raw)
In-Reply-To: <20200930112609.GA223360@mtl-vdi-166.wap.labs.mlnx>


On 2020/9/30 下午7:26, Eli Cohen wrote:
> On Thu, Sep 24, 2020 at 11:21:06AM +0800, Jason Wang wrote:
>> To prepare for the ASID support for vhost-vdpa, try to pass IOTLB
>> object to dma helpers.
> Maybe it's worth mentioning here that this patch does not change any
> functionality and is presented as a preparation for passing different
> iotlb's instead of using dev->iotlb


Right, let me add them in the next version.

Thanks


>
>> Signed-off-by: Jason Wang <jasowang@redhat.com>
>> ---
>>   drivers/vhost/vdpa.c | 40 ++++++++++++++++++++++------------------
>>   1 file changed, 22 insertions(+), 18 deletions(-)
>>
>> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
>> index 9c641274b9f3..74bef1c15a70 100644
>> --- a/drivers/vhost/vdpa.c
>> +++ b/drivers/vhost/vdpa.c
>> @@ -489,10 +489,11 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
>>   	return r;
>>   }
>>   
>> -static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
>> +static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
>> +				   struct vhost_iotlb *iotlb,
>> +				   u64 start, u64 last)
>>   {
>>   	struct vhost_dev *dev = &v->vdev;
>> -	struct vhost_iotlb *iotlb = dev->iotlb;
>>   	struct vhost_iotlb_map *map;
>>   	struct page *page;
>>   	unsigned long pfn, pinned;
>> @@ -514,8 +515,9 @@ static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
>>   static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
>>   {
>>   	struct vhost_dev *dev = &v->vdev;
>> +	struct vhost_iotlb *iotlb = dev->iotlb;
>>   
>> -	vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
>> +	vhost_vdpa_iotlb_unmap(v, iotlb, 0ULL, 0ULL - 1);
>>   	kfree(dev->iotlb);
>>   	dev->iotlb = NULL;
>>   }
>> @@ -542,15 +544,14 @@ static int perm_to_iommu_flags(u32 perm)
>>   	return flags | IOMMU_CACHE;
>>   }
>>   
>> -static int vhost_vdpa_map(struct vhost_vdpa *v,
>> +static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
>>   			  u64 iova, u64 size, u64 pa, u32 perm)
>>   {
>> -	struct vhost_dev *dev = &v->vdev;
>>   	struct vdpa_device *vdpa = v->vdpa;
>>   	const struct vdpa_config_ops *ops = vdpa->config;
>>   	int r = 0;
>>   
>> -	r = vhost_iotlb_add_range(dev->iotlb, iova, iova + size - 1,
>> +	r = vhost_iotlb_add_range(iotlb, iova, iova + size - 1,
>>   				  pa, perm);
>>   	if (r)
>>   		return r;
>> @@ -559,7 +560,7 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
>>   		r = ops->dma_map(vdpa, iova, size, pa, perm);
>>   	} else if (ops->set_map) {
>>   		if (!v->in_batch)
>> -			r = ops->set_map(vdpa, dev->iotlb);
>> +			r = ops->set_map(vdpa, iotlb);
>>   	} else {
>>   		r = iommu_map(v->domain, iova, pa, size,
>>   			      perm_to_iommu_flags(perm));
>> @@ -568,29 +569,30 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
>>   	return r;
>>   }
>>   
>> -static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
>> +static void vhost_vdpa_unmap(struct vhost_vdpa *v,
>> +			     struct vhost_iotlb *iotlb,
>> +			     u64 iova, u64 size)
>>   {
>> -	struct vhost_dev *dev = &v->vdev;
>>   	struct vdpa_device *vdpa = v->vdpa;
>>   	const struct vdpa_config_ops *ops = vdpa->config;
>>   
>> -	vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
>> +	vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1);
>>   
>>   	if (ops->dma_map) {
>>   		ops->dma_unmap(vdpa, iova, size);
>>   	} else if (ops->set_map) {
>>   		if (!v->in_batch)
>> -			ops->set_map(vdpa, dev->iotlb);
>> +			ops->set_map(vdpa, iotlb);
>>   	} else {
>>   		iommu_unmap(v->domain, iova, size);
>>   	}
>>   }
>>   
>>   static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>> +					   struct vhost_iotlb *iotlb,
>>   					   struct vhost_iotlb_msg *msg)
>>   {
>>   	struct vhost_dev *dev = &v->vdev;
>> -	struct vhost_iotlb *iotlb = dev->iotlb;
>>   	struct page **page_list;
>>   	unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
>>   	unsigned int gup_flags = FOLL_LONGTERM;
>> @@ -644,7 +646,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>>   			if (last_pfn && (this_pfn != last_pfn + 1)) {
>>   				/* Pin a contiguous chunk of memory */
>>   				csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
>> -				if (vhost_vdpa_map(v, iova, csize,
>> +				if (vhost_vdpa_map(v, iotlb, iova, csize,
>>   						   map_pfn << PAGE_SHIFT,
>>   						   msg->perm))
>>   					goto out;
>> @@ -660,11 +662,12 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
>>   	}
>>   
>>   	/* Pin the rest chunk */
>> -	ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
>> +	ret = vhost_vdpa_map(v, iotlb, iova,
>> +			     (last_pfn - map_pfn + 1) << PAGE_SHIFT,
>>   			     map_pfn << PAGE_SHIFT, msg->perm);
>>   out:
>>   	if (ret) {
>> -		vhost_vdpa_unmap(v, msg->iova, msg->size);
>> +		vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size);
>>   		atomic64_sub(npages, &dev->mm->pinned_vm);
>>   	}
>>   	mmap_read_unlock(dev->mm);
>> @@ -678,6 +681,7 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
>>   	struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
>>   	struct vdpa_device *vdpa = v->vdpa;
>>   	const struct vdpa_config_ops *ops = vdpa->config;
>> +	struct vhost_iotlb *iotlb = dev->iotlb;
>>   	int r = 0;
>>   
>>   	r = vhost_dev_check_owner(dev);
>> @@ -686,17 +690,17 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
>>   
>>   	switch (msg->type) {
>>   	case VHOST_IOTLB_UPDATE:
>> -		r = vhost_vdpa_process_iotlb_update(v, msg);
>> +		r = vhost_vdpa_process_iotlb_update(v, iotlb, msg);
>>   		break;
>>   	case VHOST_IOTLB_INVALIDATE:
>> -		vhost_vdpa_unmap(v, msg->iova, msg->size);
>> +		vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size);
>>   		break;
>>   	case VHOST_IOTLB_BATCH_BEGIN:
>>   		v->in_batch = true;
>>   		break;
>>   	case VHOST_IOTLB_BATCH_END:
>>   		if (v->in_batch && ops->set_map)
>> -			ops->set_map(vdpa, dev->iotlb);
>> +			ops->set_map(vdpa, iotlb);
>>   		v->in_batch = false;
>>   		break;
>>   	default:
>> -- 
>> 2.20.1
>>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

  reply	other threads:[~2020-10-09  2:01 UTC|newest]

Thread overview: 99+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-24  3:21 [RFC PATCH 00/24] Control VQ support in vDPA Jason Wang
2020-09-24  3:21 ` Jason Wang
2020-09-24  3:21 ` [RFC PATCH 01/24] vhost-vdpa: fix backend feature ioctls Jason Wang
2020-09-24  3:21   ` Jason Wang
2020-09-24  7:16   ` Eli Cohen
2020-09-24  7:26     ` Jason Wang
2020-09-24  7:26       ` Jason Wang
2020-09-24  7:38       ` Eli Cohen
2020-09-24  7:50   ` Michael S. Tsirkin
2020-09-24  7:50     ` Michael S. Tsirkin
2020-09-24  8:28     ` Jason Wang
2020-09-24  8:28       ` Jason Wang
2020-09-24  3:21 ` [RFC PATCH 02/24] vhost-vdpa: fix vqs leak in vhost_vdpa_open() Jason Wang
2020-09-24  3:21   ` Jason Wang
2020-09-24  7:48   ` Eli Cohen
2020-09-25 11:41     ` Jason Wang
2020-09-25 11:41       ` Jason Wang
2020-09-24  9:31   ` Michael S. Tsirkin
2020-09-24  9:31     ` Michael S. Tsirkin
2020-09-25 11:27     ` Jason Wang
2020-09-25 11:27       ` Jason Wang
2020-09-24  3:21 ` [RFC PATCH 03/24] vhost: move the backend feature bits to vhost_types.h Jason Wang
2020-09-24  3:21   ` Jason Wang
2020-09-24  3:21 ` [RFC PATCH 04/24] virtio-vdpa: don't set callback if virtio doesn't need it Jason Wang
2020-09-24  3:21   ` Jason Wang
2020-09-24  3:21 ` [RFC PATCH 05/24] vhost-vdpa: passing iotlb to IOMMU mapping helpers Jason Wang
2020-09-24  3:21   ` Jason Wang
2020-09-30 11:26   ` Eli Cohen
2020-10-09  2:01     ` Jason Wang [this message]
2020-10-09  2:01       ` Jason Wang
2020-09-24  3:21 ` [RFC PATCH 06/24] vhost-vdpa: switch to use vhost-vdpa specific IOTLB Jason Wang
2020-09-24  3:21   ` Jason Wang
2020-09-30 12:02   ` Eli Cohen
2020-10-09  3:46     ` Jason Wang
2020-10-09  3:46       ` Jason Wang
2020-09-24  3:21 ` [RFC PATCH 07/24] vdpa: add the missing comment for nvqs in struct vdpa_device Jason Wang
2020-09-24  3:21   ` Jason Wang
2020-09-24  3:21 ` [RFC PATCH 08/24] vdpa: introduce virtqueue groups Jason Wang
2020-09-24  3:21   ` Jason Wang
2020-09-28 15:44   ` Eugenio Perez Martin
2020-10-09  3:49     ` Jason Wang
2020-10-09  3:49       ` Jason Wang
2020-09-24  3:21 ` [RFC PATCH 09/24] vdpa: multiple address spaces support Jason Wang
2020-09-24  3:21   ` Jason Wang
2020-10-01 13:21   ` Eli Cohen
2020-10-09  3:51     ` Jason Wang
2020-10-09  3:51       ` Jason Wang
2020-10-01 13:23   ` Eli Cohen
2020-10-09  3:52     ` Jason Wang
2020-10-09  3:52       ` Jason Wang
2020-09-24  3:21 ` [RFC PATCH 10/24] vdpa: introduce config operations for associating ASID to a virtqueue group Jason Wang
2020-09-24  3:21   ` Jason Wang
2020-10-01 13:29   ` Eli Cohen
2020-10-09  3:56     ` Jason Wang
2020-10-09  3:56       ` Jason Wang
2020-10-12  6:59       ` Eli Cohen
2020-10-12  7:45         ` Jason Wang
2020-10-12  7:45           ` Jason Wang
2020-10-12  8:17           ` Eli Cohen
2020-10-13  5:40             ` Jason Wang
2020-10-13  5:40               ` Jason Wang
2020-09-24  3:21 ` [RFC PATCH 11/24] vhost_iotlb: split out IOTLB initialization Jason Wang
2020-09-24  3:21   ` Jason Wang
2020-09-24  3:21 ` [RFC PATCH 12/24] vhost: support ASID in IOTLB API Jason Wang
2020-09-24  3:21   ` Jason Wang
2020-09-24  3:21 ` [RFC PATCH 13/24] vhost-vdpa: introduce ASID based IOTLB Jason Wang
2020-09-24  3:21   ` Jason Wang
2020-09-29 14:40   ` Eugenio Perez Martin
2020-09-24  3:21 ` [RFC PATCH 14/24] vhost-vdpa: introduce uAPI to get the number of virtqueue groups Jason Wang
2020-09-24  3:21   ` Jason Wang
2020-09-24  3:21 ` [RFC PATCH 15/24] vhost-vdpa: introduce uAPI to get the number of address spaces Jason Wang
2020-09-24  3:21   ` Jason Wang
2020-09-24  3:21 ` [RFC PATCH 16/24] vhost-vdpa: uAPI to get virtqueue group id Jason Wang
2020-09-24  3:21   ` Jason Wang
2020-09-24  3:21 ` [RFC PATCH 17/24] vhost-vdpa: introduce uAPI to set group ASID Jason Wang
2020-09-24  3:21   ` Jason Wang
2020-09-24  3:21 ` [RFC PATCH 18/24] vhost-vdpa: support ASID based IOTLB API Jason Wang
2020-09-24  3:21   ` Jason Wang
2020-09-28 15:44   ` Eugenio Perez Martin
2020-10-09  3:59     ` Jason Wang
2020-10-09  3:59       ` Jason Wang
2020-09-24  3:21 ` [RFC PATCH 19/24] vdpa_sim: use separated iov for reading and writing Jason Wang
2020-09-24  3:21   ` Jason Wang
2020-09-24  3:21 ` [RFC PATCH 20/24] vdpa_sim: advertise VIRTIO_NET_F_MTU Jason Wang
2020-09-24  3:21   ` Jason Wang
2020-09-24  3:21 ` [RFC PATCH 21/24] vdpa_sim: advertise VIRTIO_NET_F_MAC Jason Wang
2020-09-24  3:21   ` Jason Wang
2020-09-24  3:21 ` [RFC PATCH 22/24] vdpa_sim: factor out buffer completion logic Jason Wang
2020-09-24  3:21   ` Jason Wang
2020-09-24  3:21 ` [RFC PATCH 23/24] vdpa_sim: filter destination mac address Jason Wang
2020-09-24  3:21   ` Jason Wang
2020-09-24  3:21 ` [RFC PATCH 24/24] vdpasim: control virtqueue support Jason Wang
2020-09-24  3:21   ` Jason Wang
2020-09-24  5:56   ` kernel test robot
2020-09-25  1:04   ` kernel test robot
2020-09-24 10:17 ` [RFC PATCH 00/24] Control VQ support in vDPA Stefan Hajnoczi
2020-09-24 10:17   ` Stefan Hajnoczi
2020-09-25 11:36   ` Jason Wang
2020-09-25 11:36     ` Jason Wang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=5f083453-d070-d8a8-1f75-5de1c299cd0b@redhat.com \
    --to=jasowang@redhat.com \
    --cc=amorenoz@redhat.com \
    --cc=eli@mellanox.com \
    --cc=elic@nvidia.com \
    --cc=eperezma@redhat.com \
    --cc=hanand@xilinx.com \
    --cc=kvm@vger.kernel.org \
    --cc=lingshan.zhu@intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=lulu@redhat.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=mhabets@solarflare.com \
    --cc=mst@redhat.com \
    --cc=netdev@vger.kernel.org \
    --cc=rob.miller@broadcom.com \
    --cc=sgarzare@redhat.com \
    --cc=stefanha@redhat.com \
    --cc=virtualization@lists.linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.