From: Jason Wang <jasowang@redhat.com> To: mst@redhat.com, kvm@vger.kernel.org, virtualization@lists.linux-foundation.org, netdev@vger.kernel.org, linux-kernel@vger.kernel.org Cc: Jason Wang <jasowang@redhat.com> Subject: [PATCH net-next 1/8] ptr_ring: introduce batch dequeuing Date: Tue, 21 Mar 2017 12:04:40 +0800 [thread overview] Message-ID: <1490069087-4783-2-git-send-email-jasowang@redhat.com> (raw) In-Reply-To: <1490069087-4783-1-git-send-email-jasowang@redhat.com> Signed-off-by: Jason Wang <jasowang@redhat.com> --- include/linux/ptr_ring.h | 65 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 6c70444..4771ded 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -247,6 +247,22 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) return ptr; } +static inline int __ptr_ring_consume_batched(struct ptr_ring *r, + void **array, int n) +{ + void *ptr; + int i = 0; + + while (i < n) { + ptr = __ptr_ring_consume(r); + if (!ptr) + break; + array[i++] = ptr; + } + + return i; +} + /* * Note: resize (below) nests producer lock within consumer lock, so if you * call this in interrupt or BH context, you must disable interrupts/BH when @@ -297,6 +313,55 @@ static inline void *ptr_ring_consume_bh(struct ptr_ring *r) return ptr; } +static inline int ptr_ring_consume_batched(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock(&r->consumer_lock); + + return ret; +} + +static inline int ptr_ring_consume_batched_irq(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock_irq(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_irq(&r->consumer_lock); + + return ret; +} + +static inline int ptr_ring_consume_batched_any(struct ptr_ring *r, + void **array, int n) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&r->consumer_lock, flags); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_irqrestore(&r->consumer_lock, flags); + + return ret; +} + +static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock_bh(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_bh(&r->consumer_lock); + + return ret; +} + /* Cast to structure type and call a function without discarding from FIFO. * Function must return a value. * Callers must take consumer_lock. -- 2.7.4
WARNING: multiple messages have this Message-ID (diff)
From: Jason Wang <jasowang@redhat.com> To: mst@redhat.com, kvm@vger.kernel.org, virtualization@lists.linux-foundation.org, netdev@vger.kernel.org, linux-kernel@vger.kernel.org Subject: [PATCH net-next 1/8] ptr_ring: introduce batch dequeuing Date: Tue, 21 Mar 2017 12:04:40 +0800 [thread overview] Message-ID: <1490069087-4783-2-git-send-email-jasowang@redhat.com> (raw) In-Reply-To: <1490069087-4783-1-git-send-email-jasowang@redhat.com> Signed-off-by: Jason Wang <jasowang@redhat.com> --- include/linux/ptr_ring.h | 65 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 6c70444..4771ded 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -247,6 +247,22 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) return ptr; } +static inline int __ptr_ring_consume_batched(struct ptr_ring *r, + void **array, int n) +{ + void *ptr; + int i = 0; + + while (i < n) { + ptr = __ptr_ring_consume(r); + if (!ptr) + break; + array[i++] = ptr; + } + + return i; +} + /* * Note: resize (below) nests producer lock within consumer lock, so if you * call this in interrupt or BH context, you must disable interrupts/BH when @@ -297,6 +313,55 @@ static inline void *ptr_ring_consume_bh(struct ptr_ring *r) return ptr; } +static inline int ptr_ring_consume_batched(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock(&r->consumer_lock); + + return ret; +} + +static inline int ptr_ring_consume_batched_irq(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock_irq(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_irq(&r->consumer_lock); + + return ret; +} + +static inline int ptr_ring_consume_batched_any(struct ptr_ring *r, + void **array, int n) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&r->consumer_lock, flags); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_irqrestore(&r->consumer_lock, flags); + + return ret; +} + +static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock_bh(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_bh(&r->consumer_lock); + + return ret; +} + /* Cast to structure type and call a function without discarding from FIFO. * Function must return a value. * Callers must take consumer_lock. -- 2.7.4
next prev parent reply other threads:[~2017-03-21 4:15 UTC|newest] Thread overview: 45+ messages / expand[flat|nested] mbox.gz Atom feed top 2017-03-21 4:04 [PATCH net-next 0/8] vhost-net rx batching Jason Wang 2017-03-21 4:04 ` Jason Wang 2017-03-21 4:04 ` Jason Wang [this message] 2017-03-21 4:04 ` [PATCH net-next 1/8] ptr_ring: introduce batch dequeuing Jason Wang 2017-03-21 10:25 ` Sergei Shtylyov 2017-03-22 3:16 ` Jason Wang 2017-03-22 3:16 ` Jason Wang 2017-03-22 13:43 ` Michael S. Tsirkin 2017-03-22 13:43 ` Michael S. Tsirkin 2017-03-23 5:33 ` Jason Wang 2017-03-23 5:33 ` Jason Wang 2017-03-21 4:04 ` [PATCH net-next 2/8] skb_array: " Jason Wang 2017-03-21 4:04 ` Jason Wang 2017-03-21 4:04 ` [PATCH net-next 3/8] tun: export skb_array Jason Wang 2017-03-21 4:04 ` Jason Wang 2017-03-21 4:04 ` [PATCH net-next 4/8] tap: " Jason Wang 2017-03-21 4:04 ` Jason Wang 2017-03-21 4:04 ` [PATCH net-next 5/8] tun: support receiving skb through msg_control Jason Wang 2017-03-21 4:04 ` Jason Wang 2017-03-21 4:04 ` [PATCH net-next 6/8] tap: support receiving skb from msg_control Jason Wang 2017-03-21 4:04 ` Jason Wang 2017-03-21 4:04 ` [PATCH net-next 7/8] vhost_net: try batch dequing from skb array Jason Wang 2017-03-21 4:04 ` Jason Wang 2017-03-22 14:16 ` Michael S. Tsirkin 2017-03-22 14:16 ` Michael S. Tsirkin 2017-03-23 5:34 ` Jason Wang 2017-03-23 5:34 ` Jason Wang 2017-03-29 9:58 ` Jason Wang 2017-03-29 9:58 ` Jason Wang 2017-03-29 10:46 ` Pankaj Gupta 2017-03-29 10:46 ` Pankaj Gupta 2017-03-29 10:53 ` Jason Wang 2017-03-29 10:53 ` Jason Wang 2017-03-29 21:47 ` Michael S. Tsirkin 2017-03-29 21:47 ` Michael S. Tsirkin 2017-03-21 4:04 ` [PATCH net-next 8/8] vhost_net: use lockless peeking for skb array during busy polling Jason Wang 2017-03-21 4:04 ` Jason Wang 2017-03-29 12:07 ` Michael S. Tsirkin 2017-03-29 12:07 ` Michael S. Tsirkin 2017-03-30 2:16 ` Jason Wang 2017-03-30 2:33 ` Michael S. Tsirkin 2017-03-30 2:33 ` Michael S. Tsirkin 2017-03-30 3:53 ` Jason Wang 2017-03-30 3:53 ` Jason Wang 2017-03-30 2:16 ` Jason Wang
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=1490069087-4783-2-git-send-email-jasowang@redhat.com \ --to=jasowang@redhat.com \ --cc=kvm@vger.kernel.org \ --cc=linux-kernel@vger.kernel.org \ --cc=mst@redhat.com \ --cc=netdev@vger.kernel.org \ --cc=virtualization@lists.linux-foundation.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.