* [PATCH] af_packet: TPACKET_V3: replace busy-wait loop
@ 2020-07-07 15:22 John Ogness
2020-07-15 20:21 ` Jakub Kicinski
0 siblings, 1 reply; 5+ messages in thread
From: John Ogness @ 2020-07-07 15:22 UTC (permalink / raw)
To: David S . Miller
Cc: Jakub Kicinski, Sebastian Andrzej Siewior, Thomas Gleixner,
netdev, linux-kernel
A busy-wait loop is used to implement waiting for bits to be copied
from the skb to the kernel buffer before retiring a block. This is
a problem on PREEMPT_RT because the copying task could be preempted
by the busy-waiting task and thus live lock in the busy-wait loop.
Replace the busy-wait logic with an rwlock_t. This provides lockdep
coverage and makes the code RT ready.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
---
patch against v5.8-rc4
net/packet/af_packet.c | 20 ++++++++++----------
net/packet/internal.h | 2 +-
2 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 29bd405adbbd..dd1eec2dd6ef 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -593,6 +593,7 @@ static void init_prb_bdqc(struct packet_sock *po,
req_u->req3.tp_block_size);
p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
+ rwlock_init(&p1->blk_fill_in_prog_lock);
p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
prb_init_ft_ops(p1, req_u);
@@ -659,10 +660,9 @@ static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
*
*/
if (BLOCK_NUM_PKTS(pbd)) {
- while (atomic_read(&pkc->blk_fill_in_prog)) {
- /* Waiting for skb_copy_bits to finish... */
- cpu_relax();
- }
+ /* Waiting for skb_copy_bits to finish... */
+ write_lock(&pkc->blk_fill_in_prog_lock);
+ write_unlock(&pkc->blk_fill_in_prog_lock);
}
if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
@@ -921,10 +921,9 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
* the timer-handler already handled this case.
*/
if (!(status & TP_STATUS_BLK_TMO)) {
- while (atomic_read(&pkc->blk_fill_in_prog)) {
- /* Waiting for skb_copy_bits to finish... */
- cpu_relax();
- }
+ /* Waiting for skb_copy_bits to finish... */
+ write_lock(&pkc->blk_fill_in_prog_lock);
+ write_unlock(&pkc->blk_fill_in_prog_lock);
}
prb_close_block(pkc, pbd, po, status);
return;
@@ -944,7 +943,8 @@ static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
{
struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
- atomic_dec(&pkc->blk_fill_in_prog);
+
+ read_unlock(&pkc->blk_fill_in_prog_lock);
}
static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
@@ -998,7 +998,7 @@ static void prb_fill_curr_block(char *curr,
pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
BLOCK_NUM_PKTS(pbd) += 1;
- atomic_inc(&pkc->blk_fill_in_prog);
+ read_lock(&pkc->blk_fill_in_prog_lock);
prb_run_all_ft_ops(pkc, ppd);
}
diff --git a/net/packet/internal.h b/net/packet/internal.h
index 907f4cd2a718..fd41ecb7f605 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -39,7 +39,7 @@ struct tpacket_kbdq_core {
char *nxt_offset;
struct sk_buff *skb;
- atomic_t blk_fill_in_prog;
+ rwlock_t blk_fill_in_prog_lock;
/* Default is set to 8ms */
#define DEFAULT_PRB_RETIRE_TOV (8)
--
2.20.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [PATCH] af_packet: TPACKET_V3: replace busy-wait loop
2020-07-07 15:22 [PATCH] af_packet: TPACKET_V3: replace busy-wait loop John Ogness
@ 2020-07-15 20:21 ` Jakub Kicinski
2020-07-15 22:35 ` Willem de Bruijn
0 siblings, 1 reply; 5+ messages in thread
From: Jakub Kicinski @ 2020-07-15 20:21 UTC (permalink / raw)
To: John Ogness, Willem de Bruijn
Cc: David S . Miller, Sebastian Andrzej Siewior, Thomas Gleixner,
netdev, linux-kernel
On Tue, 7 Jul 2020 17:28:04 +0206 John Ogness wrote:
> A busy-wait loop is used to implement waiting for bits to be copied
> from the skb to the kernel buffer before retiring a block. This is
> a problem on PREEMPT_RT because the copying task could be preempted
> by the busy-waiting task and thus live lock in the busy-wait loop.
>
> Replace the busy-wait logic with an rwlock_t. This provides lockdep
> coverage and makes the code RT ready.
>
> Signed-off-by: John Ogness <john.ogness@linutronix.de>
Is taking a lock and immediately releasing it better than a completion?
Seems like the lock is guaranteed to dirty a cache line, which would
otherwise be avoided here.
Willem, would you be able to take a look as well? Is this path
performance sensitive in real life?
> diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
> index 29bd405adbbd..dd1eec2dd6ef 100644
> --- a/net/packet/af_packet.c
> +++ b/net/packet/af_packet.c
> @@ -593,6 +593,7 @@ static void init_prb_bdqc(struct packet_sock *po,
> req_u->req3.tp_block_size);
> p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
> p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
> + rwlock_init(&p1->blk_fill_in_prog_lock);
>
> p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
> prb_init_ft_ops(p1, req_u);
> @@ -659,10 +660,9 @@ static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
> *
> */
> if (BLOCK_NUM_PKTS(pbd)) {
> - while (atomic_read(&pkc->blk_fill_in_prog)) {
> - /* Waiting for skb_copy_bits to finish... */
> - cpu_relax();
> - }
> + /* Waiting for skb_copy_bits to finish... */
> + write_lock(&pkc->blk_fill_in_prog_lock);
> + write_unlock(&pkc->blk_fill_in_prog_lock);
> }
>
> if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
> @@ -921,10 +921,9 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
> * the timer-handler already handled this case.
> */
> if (!(status & TP_STATUS_BLK_TMO)) {
> - while (atomic_read(&pkc->blk_fill_in_prog)) {
> - /* Waiting for skb_copy_bits to finish... */
> - cpu_relax();
> - }
> + /* Waiting for skb_copy_bits to finish... */
> + write_lock(&pkc->blk_fill_in_prog_lock);
> + write_unlock(&pkc->blk_fill_in_prog_lock);
> }
> prb_close_block(pkc, pbd, po, status);
> return;
> @@ -944,7 +943,8 @@ static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
> static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
> {
> struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
> - atomic_dec(&pkc->blk_fill_in_prog);
> +
> + read_unlock(&pkc->blk_fill_in_prog_lock);
> }
>
> static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
> @@ -998,7 +998,7 @@ static void prb_fill_curr_block(char *curr,
> pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
> BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
> BLOCK_NUM_PKTS(pbd) += 1;
> - atomic_inc(&pkc->blk_fill_in_prog);
> + read_lock(&pkc->blk_fill_in_prog_lock);
> prb_run_all_ft_ops(pkc, ppd);
> }
>
> diff --git a/net/packet/internal.h b/net/packet/internal.h
> index 907f4cd2a718..fd41ecb7f605 100644
> --- a/net/packet/internal.h
> +++ b/net/packet/internal.h
> @@ -39,7 +39,7 @@ struct tpacket_kbdq_core {
> char *nxt_offset;
> struct sk_buff *skb;
>
> - atomic_t blk_fill_in_prog;
> + rwlock_t blk_fill_in_prog_lock;
>
> /* Default is set to 8ms */
> #define DEFAULT_PRB_RETIRE_TOV (8)
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH] af_packet: TPACKET_V3: replace busy-wait loop
2020-07-15 20:21 ` Jakub Kicinski
@ 2020-07-15 22:35 ` Willem de Bruijn
2020-07-16 0:22 ` Jakub Kicinski
0 siblings, 1 reply; 5+ messages in thread
From: Willem de Bruijn @ 2020-07-15 22:35 UTC (permalink / raw)
To: Jakub Kicinski
Cc: John Ogness, David S . Miller, Sebastian Andrzej Siewior,
Thomas Gleixner, Network Development, linux-kernel
On Wed, Jul 15, 2020 at 4:21 PM Jakub Kicinski <kuba@kernel.org> wrote:
>
> On Tue, 7 Jul 2020 17:28:04 +0206 John Ogness wrote:
> > A busy-wait loop is used to implement waiting for bits to be copied
> > from the skb to the kernel buffer before retiring a block. This is
> > a problem on PREEMPT_RT because the copying task could be preempted
> > by the busy-waiting task and thus live lock in the busy-wait loop.
> >
> > Replace the busy-wait logic with an rwlock_t. This provides lockdep
> > coverage and makes the code RT ready.
> >
> > Signed-off-by: John Ogness <john.ogness@linutronix.de>
>
> Is taking a lock and immediately releasing it better than a completion?
> Seems like the lock is guaranteed to dirty a cache line, which would
> otherwise be avoided here.
>
> Willem, would you be able to take a look as well? Is this path
> performance sensitive in real life?
No objections from me.
I guess this resolves the issue on preempt_rt, because the spinlocks act as
mutexes. It will still spin on write_lock otherwise, no huge difference from
existing logic.
>
> > diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
> > index 29bd405adbbd..dd1eec2dd6ef 100644
> > --- a/net/packet/af_packet.c
> > +++ b/net/packet/af_packet.c
> > @@ -593,6 +593,7 @@ static void init_prb_bdqc(struct packet_sock *po,
> > req_u->req3.tp_block_size);
> > p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
> > p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
> > + rwlock_init(&p1->blk_fill_in_prog_lock);
> >
> > p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
> > prb_init_ft_ops(p1, req_u);
> > @@ -659,10 +660,9 @@ static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
> > *
> > */
> > if (BLOCK_NUM_PKTS(pbd)) {
> > - while (atomic_read(&pkc->blk_fill_in_prog)) {
> > - /* Waiting for skb_copy_bits to finish... */
> > - cpu_relax();
> > - }
> > + /* Waiting for skb_copy_bits to finish... */
> > + write_lock(&pkc->blk_fill_in_prog_lock);
> > + write_unlock(&pkc->blk_fill_in_prog_lock);
> > }
> >
> > if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
> > @@ -921,10 +921,9 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
> > * the timer-handler already handled this case.
> > */
> > if (!(status & TP_STATUS_BLK_TMO)) {
> > - while (atomic_read(&pkc->blk_fill_in_prog)) {
> > - /* Waiting for skb_copy_bits to finish... */
> > - cpu_relax();
> > - }
> > + /* Waiting for skb_copy_bits to finish... */
> > + write_lock(&pkc->blk_fill_in_prog_lock);
> > + write_unlock(&pkc->blk_fill_in_prog_lock);
> > }
> > prb_close_block(pkc, pbd, po, status);
> > return;
> > @@ -944,7 +943,8 @@ static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
> > static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
> > {
> > struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
> > - atomic_dec(&pkc->blk_fill_in_prog);
> > +
> > + read_unlock(&pkc->blk_fill_in_prog_lock);
> > }
> >
> > static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
> > @@ -998,7 +998,7 @@ static void prb_fill_curr_block(char *curr,
> > pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
> > BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
> > BLOCK_NUM_PKTS(pbd) += 1;
> > - atomic_inc(&pkc->blk_fill_in_prog);
> > + read_lock(&pkc->blk_fill_in_prog_lock);
> > prb_run_all_ft_ops(pkc, ppd);
> > }
> >
> > diff --git a/net/packet/internal.h b/net/packet/internal.h
> > index 907f4cd2a718..fd41ecb7f605 100644
> > --- a/net/packet/internal.h
> > +++ b/net/packet/internal.h
> > @@ -39,7 +39,7 @@ struct tpacket_kbdq_core {
> > char *nxt_offset;
> > struct sk_buff *skb;
> >
> > - atomic_t blk_fill_in_prog;
> > + rwlock_t blk_fill_in_prog_lock;
> >
> > /* Default is set to 8ms */
> > #define DEFAULT_PRB_RETIRE_TOV (8)
>
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH] af_packet: TPACKET_V3: replace busy-wait loop
2020-07-15 22:35 ` Willem de Bruijn
@ 2020-07-16 0:22 ` Jakub Kicinski
0 siblings, 0 replies; 5+ messages in thread
From: Jakub Kicinski @ 2020-07-16 0:22 UTC (permalink / raw)
To: Willem de Bruijn
Cc: John Ogness, David S . Miller, Sebastian Andrzej Siewior,
Thomas Gleixner, Network Development, linux-kernel
On Wed, 15 Jul 2020 18:35:00 -0400 Willem de Bruijn wrote:
> On Wed, Jul 15, 2020 at 4:21 PM Jakub Kicinski <kuba@kernel.org> wrote:
> >
> > On Tue, 7 Jul 2020 17:28:04 +0206 John Ogness wrote:
> > > A busy-wait loop is used to implement waiting for bits to be copied
> > > from the skb to the kernel buffer before retiring a block. This is
> > > a problem on PREEMPT_RT because the copying task could be preempted
> > > by the busy-waiting task and thus live lock in the busy-wait loop.
> > >
> > > Replace the busy-wait logic with an rwlock_t. This provides lockdep
> > > coverage and makes the code RT ready.
> > >
> > > Signed-off-by: John Ogness <john.ogness@linutronix.de>
> >
> > Is taking a lock and immediately releasing it better than a completion?
> > Seems like the lock is guaranteed to dirty a cache line, which would
> > otherwise be avoided here.
> >
> > Willem, would you be able to take a look as well? Is this path
> > performance sensitive in real life?
>
> No objections from me.
>
> I guess this resolves the issue on preempt_rt, because the spinlocks act as
> mutexes. It will still spin on write_lock otherwise, no huge difference from
> existing logic.
Thanks!
If no one else objects I'm putting this in net-next.
Seems a little late for 5.8.
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH] af_packet: TPACKET_V3: replace busy-wait loop
@ 2020-07-07 19:32 kernel test robot
0 siblings, 0 replies; 5+ messages in thread
From: kernel test robot @ 2020-07-07 19:32 UTC (permalink / raw)
To: kbuild
[-- Attachment #1: Type: text/plain, Size: 7125 bytes --]
CC: kbuild-all(a)lists.01.org
In-Reply-To: <20200707152204.10314-1-john.ogness@linutronix.de>
References: <20200707152204.10314-1-john.ogness@linutronix.de>
TO: John Ogness <john.ogness@linutronix.de>
TO: "David S . Miller" <davem@davemloft.net>
CC: Jakub Kicinski <kuba@kernel.org>
CC: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
CC: Thomas Gleixner <tglx@linutronix.de>
CC: netdev(a)vger.kernel.org
CC: linux-kernel(a)vger.kernel.org
Hi John,
Thank you for the patch! Perhaps something to improve:
[auto build test WARNING on net-next/master]
[also build test WARNING on net/master sparc-next/master linus/master v5.8-rc4 next-20200707]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use as documented in
https://git-scm.com/docs/git-format-patch]
url: https://github.com/0day-ci/linux/commits/John-Ogness/af_packet-TPACKET_V3-replace-busy-wait-loop/20200707-233423
base: https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git d47a72152097d7be7cfc453d205196c0aa976c33
:::::: branch date: 4 hours ago
:::::: commit date: 4 hours ago
config: i386-randconfig-s002-20200707 (attached as .config)
compiler: gcc-9 (Debian 9.3.0-14) 9.3.0
reproduce:
# apt-get install sparse
# sparse version: v0.6.2-31-gabbfd661-dirty
# save the attached .config to linux build tree
make W=1 C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' ARCH=i386
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>
sparse warnings: (new ones prefixed by >>)
>> net/packet/af_packet.c:943:13: sparse: sparse: context imbalance in 'prb_clear_blk_fill_status' - unexpected unlock
>> net/packet/af_packet.c:988:13: sparse: sparse: context imbalance in 'prb_fill_curr_block' - wrong count at exit
# https://github.com/0day-ci/linux/commit/d558de7a207f4d5222ccab6eb86fad97ada91a35
git remote add linux-review https://github.com/0day-ci/linux
git remote update linux-review
git checkout d558de7a207f4d5222ccab6eb86fad97ada91a35
vim +/prb_clear_blk_fill_status +943 net/packet/af_packet.c
f6fb8f100b80737 chetan loke 2011-08-19 942
eea49cc9009767d Olof Johansson 2011-11-02 @943 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
f6fb8f100b80737 chetan loke 2011-08-19 944 {
bc59ba399113fcb chetan loke 2011-08-25 945 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
d558de7a207f4d5 John Ogness 2020-07-07 946
d558de7a207f4d5 John Ogness 2020-07-07 947 read_unlock(&pkc->blk_fill_in_prog_lock);
f6fb8f100b80737 chetan loke 2011-08-19 948 }
f6fb8f100b80737 chetan loke 2011-08-19 949
eea49cc9009767d Olof Johansson 2011-11-02 950 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
f6fb8f100b80737 chetan loke 2011-08-19 951 struct tpacket3_hdr *ppd)
f6fb8f100b80737 chetan loke 2011-08-19 952 {
3958afa1b272eb0 Tom Herbert 2013-12-15 953 ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
f6fb8f100b80737 chetan loke 2011-08-19 954 }
f6fb8f100b80737 chetan loke 2011-08-19 955
eea49cc9009767d Olof Johansson 2011-11-02 956 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
f6fb8f100b80737 chetan loke 2011-08-19 957 struct tpacket3_hdr *ppd)
f6fb8f100b80737 chetan loke 2011-08-19 958 {
f6fb8f100b80737 chetan loke 2011-08-19 959 ppd->hv1.tp_rxhash = 0;
f6fb8f100b80737 chetan loke 2011-08-19 960 }
f6fb8f100b80737 chetan loke 2011-08-19 961
eea49cc9009767d Olof Johansson 2011-11-02 962 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
f6fb8f100b80737 chetan loke 2011-08-19 963 struct tpacket3_hdr *ppd)
f6fb8f100b80737 chetan loke 2011-08-19 964 {
df8a39defad46b8 Jiri Pirko 2015-01-13 965 if (skb_vlan_tag_present(pkc->skb)) {
df8a39defad46b8 Jiri Pirko 2015-01-13 966 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
a0cdfcf39362410 Atzm Watanabe 2013-12-17 967 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
a0cdfcf39362410 Atzm Watanabe 2013-12-17 968 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
f6fb8f100b80737 chetan loke 2011-08-19 969 } else {
9e67030af367ab5 Daniel Borkmann 2012-08-20 970 ppd->hv1.tp_vlan_tci = 0;
a0cdfcf39362410 Atzm Watanabe 2013-12-17 971 ppd->hv1.tp_vlan_tpid = 0;
9e67030af367ab5 Daniel Borkmann 2012-08-20 972 ppd->tp_status = TP_STATUS_AVAILABLE;
f6fb8f100b80737 chetan loke 2011-08-19 973 }
f6fb8f100b80737 chetan loke 2011-08-19 974 }
f6fb8f100b80737 chetan loke 2011-08-19 975
bc59ba399113fcb chetan loke 2011-08-25 976 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
f6fb8f100b80737 chetan loke 2011-08-19 977 struct tpacket3_hdr *ppd)
f6fb8f100b80737 chetan loke 2011-08-19 978 {
a0cdfcf39362410 Atzm Watanabe 2013-12-17 979 ppd->hv1.tp_padding = 0;
f6fb8f100b80737 chetan loke 2011-08-19 980 prb_fill_vlan_info(pkc, ppd);
f6fb8f100b80737 chetan loke 2011-08-19 981
f6fb8f100b80737 chetan loke 2011-08-19 982 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
f6fb8f100b80737 chetan loke 2011-08-19 983 prb_fill_rxhash(pkc, ppd);
f6fb8f100b80737 chetan loke 2011-08-19 984 else
f6fb8f100b80737 chetan loke 2011-08-19 985 prb_clear_rxhash(pkc, ppd);
f6fb8f100b80737 chetan loke 2011-08-19 986 }
f6fb8f100b80737 chetan loke 2011-08-19 987
eea49cc9009767d Olof Johansson 2011-11-02 @988 static void prb_fill_curr_block(char *curr,
bc59ba399113fcb chetan loke 2011-08-25 989 struct tpacket_kbdq_core *pkc,
bc59ba399113fcb chetan loke 2011-08-25 990 struct tpacket_block_desc *pbd,
f6fb8f100b80737 chetan loke 2011-08-19 991 unsigned int len)
f6fb8f100b80737 chetan loke 2011-08-19 992 {
f6fb8f100b80737 chetan loke 2011-08-19 993 struct tpacket3_hdr *ppd;
f6fb8f100b80737 chetan loke 2011-08-19 994
f6fb8f100b80737 chetan loke 2011-08-19 995 ppd = (struct tpacket3_hdr *)curr;
f6fb8f100b80737 chetan loke 2011-08-19 996 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
f6fb8f100b80737 chetan loke 2011-08-19 997 pkc->prev = curr;
f6fb8f100b80737 chetan loke 2011-08-19 998 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
f6fb8f100b80737 chetan loke 2011-08-19 999 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
f6fb8f100b80737 chetan loke 2011-08-19 1000 BLOCK_NUM_PKTS(pbd) += 1;
d558de7a207f4d5 John Ogness 2020-07-07 1001 read_lock(&pkc->blk_fill_in_prog_lock);
f6fb8f100b80737 chetan loke 2011-08-19 1002 prb_run_all_ft_ops(pkc, ppd);
f6fb8f100b80737 chetan loke 2011-08-19 1003 }
f6fb8f100b80737 chetan loke 2011-08-19 1004
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org
[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 41518 bytes --]
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2020-07-16 0:22 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-07-07 15:22 [PATCH] af_packet: TPACKET_V3: replace busy-wait loop John Ogness
2020-07-15 20:21 ` Jakub Kicinski
2020-07-15 22:35 ` Willem de Bruijn
2020-07-16 0:22 ` Jakub Kicinski
2020-07-07 19:32 kernel test robot
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.