* [PATCH 1/5] VSOCK: support fill mergeable rx buffer in guest
@ 2018-11-05 7:45 jiangyiwen
2018-11-06 3:38 ` Jason Wang
0 siblings, 1 reply; 4+ messages in thread
From: jiangyiwen @ 2018-11-05 7:45 UTC (permalink / raw)
To: stefanha, Jason Wang; +Cc: netdev, kvm, virtualization
In driver probing, if virtio has VIRTIO_VSOCK_F_MRG_RXBUF feature,
it will fill mergeable rx buffer, support for host send mergeable
rx buffer. It will fill a page everytime to compact with small
packet and big packet.
Signed-off-by: Yiwen Jiang <jiangyiwen@huawei.com>
---
include/linux/virtio_vsock.h | 3 ++
net/vmw_vsock/virtio_transport.c | 72 +++++++++++++++++++++++++++++-----------
2 files changed, 56 insertions(+), 19 deletions(-)
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index e223e26..bf84418 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -14,6 +14,9 @@
#define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL
#define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64)
+/* Virtio-vsock feature */
+#define VIRTIO_VSOCK_F_MRG_RXBUF 0 /* Host can merge receive buffers. */
+
enum {
VSOCK_VQ_RX = 0, /* for host to guest data */
VSOCK_VQ_TX = 1, /* for guest to host data */
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 5d3cce9..2040a9e 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -64,6 +64,7 @@ struct virtio_vsock {
struct virtio_vsock_event event_list[8];
u32 guest_cid;
+ bool mergeable;
};
static struct virtio_vsock *virtio_vsock_get(void)
@@ -256,6 +257,25 @@ static int virtio_transport_send_pkt_loopback(struct virtio_vsock *vsock,
return 0;
}
+static int fill_mergeable_rx_buff(struct virtqueue *vq)
+{
+ void *page = NULL;
+ struct scatterlist sg;
+ int err;
+
+ page = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ sg_init_one(&sg, page, PAGE_SIZE);
+
+ err = virtqueue_add_inbuf(vq, &sg, 1, page, GFP_KERNEL);
+ if (err < 0)
+ free_page((unsigned long) page);
+
+ return err;
+}
+
static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
{
int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
@@ -267,27 +287,33 @@ static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
vq = vsock->vqs[VSOCK_VQ_RX];
do {
- pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
- if (!pkt)
- break;
+ if (vsock->mergeable) {
+ ret = fill_mergeable_rx_buff(vq);
+ if (ret)
+ break;
+ } else {
+ pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
+ if (!pkt)
+ break;
- pkt->buf = kmalloc(buf_len, GFP_KERNEL);
- if (!pkt->buf) {
- virtio_transport_free_pkt(pkt);
- break;
- }
+ pkt->buf = kmalloc(buf_len, GFP_KERNEL);
+ if (!pkt->buf) {
+ virtio_transport_free_pkt(pkt);
+ break;
+ }
- pkt->len = buf_len;
+ pkt->len = buf_len;
- sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr));
- sgs[0] = &hdr;
+ sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr));
+ sgs[0] = &hdr;
- sg_init_one(&buf, pkt->buf, buf_len);
- sgs[1] = &buf;
- ret = virtqueue_add_sgs(vq, sgs, 0, 2, pkt, GFP_KERNEL);
- if (ret) {
- virtio_transport_free_pkt(pkt);
- break;
+ sg_init_one(&buf, pkt->buf, buf_len);
+ sgs[1] = &buf;
+ ret = virtqueue_add_sgs(vq, sgs, 0, 2, pkt, GFP_KERNEL);
+ if (ret) {
+ virtio_transport_free_pkt(pkt);
+ break;
+ }
}
vsock->rx_buf_nr++;
} while (vq->num_free);
@@ -588,6 +614,9 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
if (ret < 0)
goto out_vqs;
+ if (virtio_has_feature(vdev, VIRTIO_VSOCK_F_MRG_RXBUF))
+ vsock->mergeable = true;
+
vsock->rx_buf_nr = 0;
vsock->rx_buf_max_nr = 0;
atomic_set(&vsock->queued_replies, 0);
@@ -640,8 +669,12 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
vdev->config->reset(vdev);
mutex_lock(&vsock->rx_lock);
- while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX])))
- virtio_transport_free_pkt(pkt);
+ while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX]))) {
+ if (vsock->mergeable)
+ free_page((unsigned long)(void *)pkt);
+ else
+ virtio_transport_free_pkt(pkt);
+ }
mutex_unlock(&vsock->rx_lock);
mutex_lock(&vsock->tx_lock);
@@ -683,6 +716,7 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
};
static unsigned int features[] = {
+ VIRTIO_VSOCK_F_MRG_RXBUF,
};
static struct virtio_driver virtio_vsock_driver = {
--
1.8.3.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH 1/5] VSOCK: support fill mergeable rx buffer in guest
2018-11-05 7:45 [PATCH 1/5] VSOCK: support fill mergeable rx buffer in guest jiangyiwen
@ 2018-11-06 3:38 ` Jason Wang
2018-11-06 6:22 ` jiangyiwen
0 siblings, 1 reply; 4+ messages in thread
From: Jason Wang @ 2018-11-06 3:38 UTC (permalink / raw)
To: jiangyiwen, stefanha; +Cc: netdev, kvm, virtualization
On 2018/11/5 下午3:45, jiangyiwen wrote:
> In driver probing, if virtio has VIRTIO_VSOCK_F_MRG_RXBUF feature,
> it will fill mergeable rx buffer, support for host send mergeable
> rx buffer. It will fill a page everytime to compact with small
> packet and big packet.
>
> Signed-off-by: Yiwen Jiang <jiangyiwen@huawei.com>
> ---
> include/linux/virtio_vsock.h | 3 ++
> net/vmw_vsock/virtio_transport.c | 72 +++++++++++++++++++++++++++++-----------
> 2 files changed, 56 insertions(+), 19 deletions(-)
>
> diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
> index e223e26..bf84418 100644
> --- a/include/linux/virtio_vsock.h
> +++ b/include/linux/virtio_vsock.h
> @@ -14,6 +14,9 @@
> #define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL
> #define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64)
>
> +/* Virtio-vsock feature */
> +#define VIRTIO_VSOCK_F_MRG_RXBUF 0 /* Host can merge receive buffers. */
> +
> enum {
> VSOCK_VQ_RX = 0, /* for host to guest data */
> VSOCK_VQ_TX = 1, /* for guest to host data */
> diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
> index 5d3cce9..2040a9e 100644
> --- a/net/vmw_vsock/virtio_transport.c
> +++ b/net/vmw_vsock/virtio_transport.c
> @@ -64,6 +64,7 @@ struct virtio_vsock {
> struct virtio_vsock_event event_list[8];
>
> u32 guest_cid;
> + bool mergeable;
> };
>
> static struct virtio_vsock *virtio_vsock_get(void)
> @@ -256,6 +257,25 @@ static int virtio_transport_send_pkt_loopback(struct virtio_vsock *vsock,
> return 0;
> }
>
> +static int fill_mergeable_rx_buff(struct virtqueue *vq)
> +{
> + void *page = NULL;
> + struct scatterlist sg;
> + int err;
> +
> + page = (void *)get_zeroed_page(GFP_KERNEL);
Any reason to use zeroed page?
> + if (!page)
> + return -ENOMEM;
> +
> + sg_init_one(&sg, page, PAGE_SIZE);
FYI, for virtio-net we have several optimizations for mergeable rx buffer:
- skb_page_frag_refill() which can use high order page and reduce the
stress of page allocator
- we don't use fixed buffer size, instead we use EWMA to estimate the
possible rx buffer size to avoid internal fragmentation
If we can try to reuse virtio-net driver, we will get those nice features.
Thanks
> +
> + err = virtqueue_add_inbuf(vq, &sg, 1, page, GFP_KERNEL);
> + if (err < 0)
> + free_page((unsigned long) page);
> +
> + return err;
> +}
> +
> static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
> {
> int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
> @@ -267,27 +287,33 @@ static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
> vq = vsock->vqs[VSOCK_VQ_RX];
>
> do {
> - pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
> - if (!pkt)
> - break;
> + if (vsock->mergeable) {
> + ret = fill_mergeable_rx_buff(vq);
> + if (ret)
> + break;
> + } else {
> + pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
> + if (!pkt)
> + break;
>
> - pkt->buf = kmalloc(buf_len, GFP_KERNEL);
> - if (!pkt->buf) {
> - virtio_transport_free_pkt(pkt);
> - break;
> - }
> + pkt->buf = kmalloc(buf_len, GFP_KERNEL);
> + if (!pkt->buf) {
> + virtio_transport_free_pkt(pkt);
> + break;
> + }
>
> - pkt->len = buf_len;
> + pkt->len = buf_len;
>
> - sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr));
> - sgs[0] = &hdr;
> + sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr));
> + sgs[0] = &hdr;
>
> - sg_init_one(&buf, pkt->buf, buf_len);
> - sgs[1] = &buf;
> - ret = virtqueue_add_sgs(vq, sgs, 0, 2, pkt, GFP_KERNEL);
> - if (ret) {
> - virtio_transport_free_pkt(pkt);
> - break;
> + sg_init_one(&buf, pkt->buf, buf_len);
> + sgs[1] = &buf;
> + ret = virtqueue_add_sgs(vq, sgs, 0, 2, pkt, GFP_KERNEL);
> + if (ret) {
> + virtio_transport_free_pkt(pkt);
> + break;
> + }
> }
> vsock->rx_buf_nr++;
> } while (vq->num_free);
> @@ -588,6 +614,9 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
> if (ret < 0)
> goto out_vqs;
>
> + if (virtio_has_feature(vdev, VIRTIO_VSOCK_F_MRG_RXBUF))
> + vsock->mergeable = true;
> +
> vsock->rx_buf_nr = 0;
> vsock->rx_buf_max_nr = 0;
> atomic_set(&vsock->queued_replies, 0);
> @@ -640,8 +669,12 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
> vdev->config->reset(vdev);
>
> mutex_lock(&vsock->rx_lock);
> - while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX])))
> - virtio_transport_free_pkt(pkt);
> + while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX]))) {
> + if (vsock->mergeable)
> + free_page((unsigned long)(void *)pkt);
> + else
> + virtio_transport_free_pkt(pkt);
> + }
> mutex_unlock(&vsock->rx_lock);
>
> mutex_lock(&vsock->tx_lock);
> @@ -683,6 +716,7 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
> };
>
> static unsigned int features[] = {
> + VIRTIO_VSOCK_F_MRG_RXBUF,
> };
>
> static struct virtio_driver virtio_vsock_driver = {
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH 1/5] VSOCK: support fill mergeable rx buffer in guest
2018-11-06 3:38 ` Jason Wang
@ 2018-11-06 6:22 ` jiangyiwen
2018-11-07 6:17 ` Jason Wang
0 siblings, 1 reply; 4+ messages in thread
From: jiangyiwen @ 2018-11-06 6:22 UTC (permalink / raw)
To: Jason Wang, stefanha; +Cc: netdev, kvm, virtualization
On 2018/11/6 11:38, Jason Wang wrote:
>
> On 2018/11/5 下午3:45, jiangyiwen wrote:
>> In driver probing, if virtio has VIRTIO_VSOCK_F_MRG_RXBUF feature,
>> it will fill mergeable rx buffer, support for host send mergeable
>> rx buffer. It will fill a page everytime to compact with small
>> packet and big packet.
>>
>> Signed-off-by: Yiwen Jiang <jiangyiwen@huawei.com>
>> ---
>> include/linux/virtio_vsock.h | 3 ++
>> net/vmw_vsock/virtio_transport.c | 72 +++++++++++++++++++++++++++++-----------
>> 2 files changed, 56 insertions(+), 19 deletions(-)
>>
>> diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
>> index e223e26..bf84418 100644
>> --- a/include/linux/virtio_vsock.h
>> +++ b/include/linux/virtio_vsock.h
>> @@ -14,6 +14,9 @@
>> #define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL
>> #define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64)
>>
>> +/* Virtio-vsock feature */
>> +#define VIRTIO_VSOCK_F_MRG_RXBUF 0 /* Host can merge receive buffers. */
>> +
>> enum {
>> VSOCK_VQ_RX = 0, /* for host to guest data */
>> VSOCK_VQ_TX = 1, /* for guest to host data */
>> diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
>> index 5d3cce9..2040a9e 100644
>> --- a/net/vmw_vsock/virtio_transport.c
>> +++ b/net/vmw_vsock/virtio_transport.c
>> @@ -64,6 +64,7 @@ struct virtio_vsock {
>> struct virtio_vsock_event event_list[8];
>>
>> u32 guest_cid;
>> + bool mergeable;
>> };
>>
>> static struct virtio_vsock *virtio_vsock_get(void)
>> @@ -256,6 +257,25 @@ static int virtio_transport_send_pkt_loopback(struct virtio_vsock *vsock,
>> return 0;
>> }
>>
>> +static int fill_mergeable_rx_buff(struct virtqueue *vq)
>> +{
>> + void *page = NULL;
>> + struct scatterlist sg;
>> + int err;
>> +
>> + page = (void *)get_zeroed_page(GFP_KERNEL);
>
>
> Any reason to use zeroed page?
In previous version, the entire structure of virtio_vsock_pkt is preallocated
in guest use kzalloc, it is a contiguous zeroed physical memory, but host only
need to fill virtio_vsock_hdr size.
However, in mergeable rx buffer version, we only fill a page in vring descriptor
in guest, and I will reserve size of virtio_vsock_pkt in host instead of write
the total size of virtio_vsock_pkt, for the correctness of structure value,
we should set zeroed page in advance.
>
>
>> + if (!page)
>> + return -ENOMEM;
>> +
>> + sg_init_one(&sg, page, PAGE_SIZE);
>
>
> FYI, for virtio-net we have several optimizations for mergeable rx buffer:
>
> - skb_page_frag_refill() which can use high order page and reduce the stress of page allocator
>
You're right, initially I want to use a memory poll to manage the rx buffer,
and then use this in the later optimized patch. Your advice is very great.
> - we don't use fixed buffer size, instead we use EWMA to estimate the possible rx buffer size to avoid internal fragmentation
>
Ok, I analysis the feature and consider add it into my patches.
>
> If we can try to reuse virtio-net driver, we will get those nice features.
>
Yes, after all virtio-net has a very good ecological environment, and it also
do many performance optimization, it is actually a good idea.
>
> Thanks
>
>
>> +
>> + err = virtqueue_add_inbuf(vq, &sg, 1, page, GFP_KERNEL);
>> + if (err < 0)
>> + free_page((unsigned long) page);
>> +
>> + return err;
>> +}
>> +
>> static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
>> {
>> int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
>> @@ -267,27 +287,33 @@ static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
>> vq = vsock->vqs[VSOCK_VQ_RX];
>>
>> do {
>> - pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
>> - if (!pkt)
>> - break;
>> + if (vsock->mergeable) {
>> + ret = fill_mergeable_rx_buff(vq);
>> + if (ret)
>> + break;
>> + } else {
>> + pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
>> + if (!pkt)
>> + break;
>>
>> - pkt->buf = kmalloc(buf_len, GFP_KERNEL);
>> - if (!pkt->buf) {
>> - virtio_transport_free_pkt(pkt);
>> - break;
>> - }
>> + pkt->buf = kmalloc(buf_len, GFP_KERNEL);
>> + if (!pkt->buf) {
>> + virtio_transport_free_pkt(pkt);
>> + break;
>> + }
>>
>> - pkt->len = buf_len;
>> + pkt->len = buf_len;
>>
>> - sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr));
>> - sgs[0] = &hdr;
>> + sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr));
>> + sgs[0] = &hdr;
>>
>> - sg_init_one(&buf, pkt->buf, buf_len);
>> - sgs[1] = &buf;
>> - ret = virtqueue_add_sgs(vq, sgs, 0, 2, pkt, GFP_KERNEL);
>> - if (ret) {
>> - virtio_transport_free_pkt(pkt);
>> - break;
>> + sg_init_one(&buf, pkt->buf, buf_len);
>> + sgs[1] = &buf;
>> + ret = virtqueue_add_sgs(vq, sgs, 0, 2, pkt, GFP_KERNEL);
>> + if (ret) {
>> + virtio_transport_free_pkt(pkt);
>> + break;
>> + }
>> }
>> vsock->rx_buf_nr++;
>> } while (vq->num_free);
>> @@ -588,6 +614,9 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
>> if (ret < 0)
>> goto out_vqs;
>>
>> + if (virtio_has_feature(vdev, VIRTIO_VSOCK_F_MRG_RXBUF))
>> + vsock->mergeable = true;
>> +
>> vsock->rx_buf_nr = 0;
>> vsock->rx_buf_max_nr = 0;
>> atomic_set(&vsock->queued_replies, 0);
>> @@ -640,8 +669,12 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
>> vdev->config->reset(vdev);
>>
>> mutex_lock(&vsock->rx_lock);
>> - while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX])))
>> - virtio_transport_free_pkt(pkt);
>> + while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX]))) {
>> + if (vsock->mergeable)
>> + free_page((unsigned long)(void *)pkt);
>> + else
>> + virtio_transport_free_pkt(pkt);
>> + }
>> mutex_unlock(&vsock->rx_lock);
>>
>> mutex_lock(&vsock->tx_lock);
>> @@ -683,6 +716,7 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
>> };
>>
>> static unsigned int features[] = {
>> + VIRTIO_VSOCK_F_MRG_RXBUF,
>> };
>>
>> static struct virtio_driver virtio_vsock_driver = {
>
> .
>
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH 1/5] VSOCK: support fill mergeable rx buffer in guest
2018-11-06 6:22 ` jiangyiwen
@ 2018-11-07 6:17 ` Jason Wang
0 siblings, 0 replies; 4+ messages in thread
From: Jason Wang @ 2018-11-07 6:17 UTC (permalink / raw)
To: jiangyiwen, stefanha; +Cc: netdev, kvm, virtualization
On 2018/11/6 下午2:22, jiangyiwen wrote:
> On 2018/11/6 11:38, Jason Wang wrote:
>> On 2018/11/5 下午3:45, jiangyiwen wrote:
>>> In driver probing, if virtio has VIRTIO_VSOCK_F_MRG_RXBUF feature,
>>> it will fill mergeable rx buffer, support for host send mergeable
>>> rx buffer. It will fill a page everytime to compact with small
>>> packet and big packet.
>>>
>>> Signed-off-by: Yiwen Jiang<jiangyiwen@huawei.com>
>>> ---
>>> include/linux/virtio_vsock.h | 3 ++
>>> net/vmw_vsock/virtio_transport.c | 72 +++++++++++++++++++++++++++++-----------
>>> 2 files changed, 56 insertions(+), 19 deletions(-)
>>>
>>> diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
>>> index e223e26..bf84418 100644
>>> --- a/include/linux/virtio_vsock.h
>>> +++ b/include/linux/virtio_vsock.h
>>> @@ -14,6 +14,9 @@
>>> #define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL
>>> #define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64)
>>>
>>> +/* Virtio-vsock feature */
>>> +#define VIRTIO_VSOCK_F_MRG_RXBUF 0 /* Host can merge receive buffers. */
>>> +
>>> enum {
>>> VSOCK_VQ_RX = 0, /* for host to guest data */
>>> VSOCK_VQ_TX = 1, /* for guest to host data */
>>> diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
>>> index 5d3cce9..2040a9e 100644
>>> --- a/net/vmw_vsock/virtio_transport.c
>>> +++ b/net/vmw_vsock/virtio_transport.c
>>> @@ -64,6 +64,7 @@ struct virtio_vsock {
>>> struct virtio_vsock_event event_list[8];
>>>
>>> u32 guest_cid;
>>> + bool mergeable;
>>> };
>>>
>>> static struct virtio_vsock *virtio_vsock_get(void)
>>> @@ -256,6 +257,25 @@ static int virtio_transport_send_pkt_loopback(struct virtio_vsock *vsock,
>>> return 0;
>>> }
>>>
>>> +static int fill_mergeable_rx_buff(struct virtqueue *vq)
>>> +{
>>> + void *page = NULL;
>>> + struct scatterlist sg;
>>> + int err;
>>> +
>>> + page = (void *)get_zeroed_page(GFP_KERNEL);
>> Any reason to use zeroed page?
> In previous version, the entire structure of virtio_vsock_pkt is preallocated
> in guest use kzalloc, it is a contiguous zeroed physical memory, but host only
> need to fill virtio_vsock_hdr size.
>
> However, in mergeable rx buffer version, we only fill a page in vring descriptor
> in guest, and I will reserve size of virtio_vsock_pkt in host instead of write
> the total size of virtio_vsock_pkt, for the correctness of structure value,
> we should set zeroed page in advance.
I may miss something, but it looks to me only the header needs to be zeroed.
>
>>> + if (!page)
>>> + return -ENOMEM;
>>> +
>>> + sg_init_one(&sg, page, PAGE_SIZE);
>> FYI, for virtio-net we have several optimizations for mergeable rx buffer:
>>
>> - skb_page_frag_refill() which can use high order page and reduce the stress of page allocator
>>
> You're right, initially I want to use a memory poll to manage the rx buffer,
> and then use this in the later optimized patch. Your advice is very great.
>
>> - we don't use fixed buffer size, instead we use EWMA to estimate the possible rx buffer size to avoid internal fragmentation
>>
> Ok, I analysis the feature and consider add it into my patches.
>
>> If we can try to reuse virtio-net driver, we will get those nice features.
>>
> Yes, after all virtio-net has a very good ecological environment, and it also
> do many performance optimization, it is actually a good idea.
>
Yes, so my suggestion is to consider to reuse them (unless we found
something that is a real blocker) instead of duplicating codes, features
a bugs.
Thanks
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2018-11-07 15:46 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-11-05 7:45 [PATCH 1/5] VSOCK: support fill mergeable rx buffer in guest jiangyiwen
2018-11-06 3:38 ` Jason Wang
2018-11-06 6:22 ` jiangyiwen
2018-11-07 6:17 ` Jason Wang
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).