linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Guo Zhi <qtxuning1999@sjtu.edu.cn>
To: jasowang <jasowang@redhat.com>
Cc: eperezma <eperezma@redhat.com>, sgarzare <sgarzare@redhat.com>,
	Michael Tsirkin <mst@redhat.com>, netdev <netdev@vger.kernel.org>,
	linux-kernel <linux-kernel@vger.kernel.org>,
	kvm list <kvm@vger.kernel.org>,
	virtualization <virtualization@lists.linux-foundation.org>
Subject: Re: [RFC v3 6/7] virtio: in order support for virtio_ring
Date: Thu, 8 Sep 2022 17:36:57 +0800 (CST)	[thread overview]
Message-ID: <460316643.171392.1662629817302.JavaMail.zimbra@sjtu.edu.cn> (raw)
In-Reply-To: <b1a7c454-860d-6a40-9da1-2a06f30ff1be@redhat.com>



----- Original Message -----
> From: "jasowang" <jasowang@redhat.com>
> To: "Guo Zhi" <qtxuning1999@sjtu.edu.cn>, "eperezma" <eperezma@redhat.com>, "sgarzare" <sgarzare@redhat.com>, "Michael
> Tsirkin" <mst@redhat.com>
> Cc: "netdev" <netdev@vger.kernel.org>, "linux-kernel" <linux-kernel@vger.kernel.org>, "kvm list" <kvm@vger.kernel.org>,
> "virtualization" <virtualization@lists.linux-foundation.org>
> Sent: Wednesday, September 7, 2022 1:38:03 PM
> Subject: Re: [RFC v3 6/7] virtio: in order support for virtio_ring

> 在 2022/9/1 13:54, Guo Zhi 写道:
>> If in order feature negotiated, we can skip the used ring to get
>> buffer's desc id sequentially.  For skipped buffers in the batch, the
>> used ring doesn't contain the buffer length, actually there is not need
>> to get skipped buffers' length as they are tx buffer.
>>
>> Signed-off-by: Guo Zhi <qtxuning1999@sjtu.edu.cn>
>> ---
>>   drivers/virtio/virtio_ring.c | 74 +++++++++++++++++++++++++++++++-----
>>   1 file changed, 64 insertions(+), 10 deletions(-)
>>
>> diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
>> index 00aa4b7a49c2..d52624179b43 100644
>> --- a/drivers/virtio/virtio_ring.c
>> +++ b/drivers/virtio/virtio_ring.c
>> @@ -103,6 +103,9 @@ struct vring_virtqueue {
>>   	/* Host supports indirect buffers */
>>   	bool indirect;
>>   
>> +	/* Host supports in order feature */
>> +	bool in_order;
>> +
>>   	/* Host publishes avail event idx */
>>   	bool event;
>>   
>> @@ -144,6 +147,19 @@ struct vring_virtqueue {
>>   			/* DMA address and size information */
>>   			dma_addr_t queue_dma_addr;
>>   			size_t queue_size_in_bytes;
>> +
>> +			/* If in_order feature is negotiated, here is the next head to consume */
>> +			u16 next_desc_begin;
>> +			/*
>> +			 * If in_order feature is negotiated,
>> +			 * here is the last descriptor's id in the batch
>> +			 */
>> +			u16 last_desc_in_batch;
>> +			/*
>> +			 * If in_order feature is negotiated,
>> +			 * buffers except last buffer in the batch are skipped buffer
>> +			 */
>> +			bool is_skipped_buffer;
>>   		} split;
>>   
>>   		/* Available for packed ring */
>> @@ -584,8 +600,6 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
>>   					 total_sg * sizeof(struct vring_desc),
>>   					 VRING_DESC_F_INDIRECT,
>>   					 false);
>> -		vq->split.desc_extra[head & (vq->split.vring.num - 1)].flags &=
>> -			~VRING_DESC_F_NEXT;
> 
> 
> This seems irrelevant.
> 
> 
I will put this change in another commit, this is due to my git rebase mistake.
Thanks.

>>   	}
>>   
>>   	/* We're using some buffers from the free list. */
>> @@ -701,8 +715,16 @@ static void detach_buf_split(struct vring_virtqueue *vq,
>> unsigned int head,
>>   	}
>>   
>>   	vring_unmap_one_split(vq, i);
>> -	vq->split.desc_extra[i].next = vq->free_head;
>> -	vq->free_head = head;
>> +	/*
>> +	 * If in_order feature is negotiated,
>> +	 * the descriptors are made available in order.
>> +	 * Since the free_head is already a circular list,
>> +	 * it must consume it sequentially.
>> +	 */
>> +	if (!vq->in_order) {
>> +		vq->split.desc_extra[i].next = vq->free_head;
>> +		vq->free_head = head;
>> +	}
>>   
>>   	/* Plus final descriptor */
>>   	vq->vq.num_free++;
>> @@ -744,7 +766,7 @@ static void *virtqueue_get_buf_ctx_split(struct virtqueue
>> *_vq,
>>   {
>>   	struct vring_virtqueue *vq = to_vvq(_vq);
>>   	void *ret;
>> -	unsigned int i;
>> +	unsigned int i, j;
>>   	u16 last_used;
>>   
>>   	START_USE(vq);
>> @@ -763,11 +785,38 @@ static void *virtqueue_get_buf_ctx_split(struct virtqueue
>> *_vq,
>>   	/* Only get used array entries after they have been exposed by host. */
>>   	virtio_rmb(vq->weak_barriers);
>>   
>> -	last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
>> -	i = virtio32_to_cpu(_vq->vdev,
>> -			vq->split.vring.used->ring[last_used].id);
>> -	*len = virtio32_to_cpu(_vq->vdev,
>> -			vq->split.vring.used->ring[last_used].len);
>> +	if (vq->in_order) {
>> +		last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
> 
> 
> Let's move this beyond the in_order check.
> 
> 
>> +		if (!vq->split.is_skipped_buffer) {
>> +			vq->split.last_desc_in_batch =
>> +				virtio32_to_cpu(_vq->vdev,
>> +						vq->split.vring.used->ring[last_used].id);
>> +			vq->split.is_skipped_buffer = true;
>> +		}
>> +		/* For skipped buffers in batch, we can ignore the len info, simply set len
>> as 0 */
> 
> 
> This seems to break the caller that depends on a correct len.
> 
> 
>> +		if (vq->split.next_desc_begin != vq->split.last_desc_in_batch) {
>> +			*len = 0;
>> +		} else {
>> +			*len = virtio32_to_cpu(_vq->vdev,
>> +					       vq->split.vring.used->ring[last_used].len);
>> +			vq->split.is_skipped_buffer = false;
>> +		}
>> +		i = vq->split.next_desc_begin;
>> +		j = i;
>> +		/* Indirect only takes one descriptor in descriptor table */
>> +		while (!vq->indirect && (vq->split.desc_extra[j].flags & VRING_DESC_F_NEXT))
>> +			j = (j + 1) & (vq->split.vring.num - 1);
> 
> 
> Any reason indirect descriptors can't be chained?
> 
> 
>> +		/* move to next */
>> +		j = (j + 1) % vq->split.vring.num;
>> +		/* Next buffer will use this descriptor in order */
>> +		vq->split.next_desc_begin = j;
> 
> 
> Is it more efficient to poke the available ring?
> 
> Thanks
> 
> 
>> +	} else {
>> +		last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
>> +		i = virtio32_to_cpu(_vq->vdev,
>> +				    vq->split.vring.used->ring[last_used].id);
>> +		*len = virtio32_to_cpu(_vq->vdev,
>> +				       vq->split.vring.used->ring[last_used].len);
>> +	}
>>   
>>   	if (unlikely(i >= vq->split.vring.num)) {
>>   		BAD_RING(vq, "id %u out of range\n", i);
>> @@ -2223,6 +2272,7 @@ struct virtqueue *__vring_new_virtqueue(unsigned int
>> index,
>>   
>>   	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
>>   		!context;
>> +	vq->in_order = virtio_has_feature(vdev, VIRTIO_F_IN_ORDER);
>>   	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
>>   
>>   	if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
>> @@ -2235,6 +2285,10 @@ struct virtqueue *__vring_new_virtqueue(unsigned int
>> index,
>>   	vq->split.avail_flags_shadow = 0;
>>   	vq->split.avail_idx_shadow = 0;
>>   
>> +	vq->split.next_desc_begin = 0;
>> +	vq->split.last_desc_in_batch = 0;
>> +	vq->split.is_skipped_buffer = false;
>> +
>>   	/* No callback?  Tell other side not to bother us. */
>>   	if (!callback) {
>>   		vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;

  parent reply	other threads:[~2022-09-08  9:37 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-01  5:54 [RFC v3 0/7] In order support for virtio_ring, vhost and vsock Guo Zhi
2022-09-01  5:54 ` [RFC v3 1/7] vhost: expose used buffers Guo Zhi
2022-09-01  8:55   ` Eugenio Perez Martin
2022-09-07  4:21   ` Jason Wang
2022-09-08  8:43     ` Guo Zhi
2022-09-01  5:54 ` [RFC v3 2/7] vhost_test: batch used buffer Guo Zhi
2022-09-01  5:54 ` [RFC v3 3/7] vsock: batch buffers in tx Guo Zhi
2022-09-01  9:00   ` Eugenio Perez Martin
2022-09-07  4:27   ` Jason Wang
2022-09-08  8:41     ` Guo Zhi
2022-09-01  5:54 ` [RFC v3 4/7] vsock: announce VIRTIO_F_IN_ORDER in vsock Guo Zhi
2022-09-01  5:54 ` [RFC v3 5/7] virtio: unmask F_NEXT flag in desc_extra Guo Zhi
2022-09-01  6:07   ` Xuan Zhuo
2022-09-01  6:14     ` Guo Zhi
2022-09-01  5:54 ` [RFC v3 6/7] virtio: in order support for virtio_ring Guo Zhi
2022-09-01  6:10   ` Xuan Zhuo
2022-09-07  5:38   ` Jason Wang
2022-09-08  8:47     ` Guo Zhi
2022-09-08  9:36     ` Guo Zhi [this message]
2022-10-03 11:32     ` Guo Zhi
2022-09-01  5:54 ` [RFC v3 7/7] virtio: announce VIRTIO_F_IN_ORDER support Guo Zhi
2022-09-07  4:13 ` [RFC v3 0/7] In order support for virtio_ring, vhost and vsock Jason Wang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=460316643.171392.1662629817302.JavaMail.zimbra@sjtu.edu.cn \
    --to=qtxuning1999@sjtu.edu.cn \
    --cc=eperezma@redhat.com \
    --cc=jasowang@redhat.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mst@redhat.com \
    --cc=netdev@vger.kernel.org \
    --cc=sgarzare@redhat.com \
    --cc=virtualization@lists.linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).