From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
To: Clark Williams <williams@redhat.com>
Cc: LKML <linux-kernel@vger.kernel.org>,
linux-rt-users <linux-rt-users@vger.kernel.org>,
Steven Rostedt <rostedt@goodmis.org>,
Thomas Gleixner <tglx@linutronix.de>,
Carsten Emde <C.Emde@osadl.org>, John Kacur <jkacur@redhat.com>,
Daniel Wagner <daniel.wagner@suse.com>,
Tom Zanussi <tom.zanussi@linux.intel.com>,
Pavel Machek <pavel@denx.de>,
Salvatore Bonaccorso <carnil@debian.org>
Subject: [PATCH RT] net: Add missing xmit_lock_owner hunks.
Date: Wed, 22 Dec 2021 20:35:22 +0100 [thread overview]
Message-ID: <YcN9+vGl7GXAZwJH@linutronix.de> (raw)
In-Reply-To: <163977665182.1250088.11049848941535534253@puck.lan>
The patch
net: move xmit_recursion to per-task variable on -RT
lost a few hunks during its rebase.
Add the `xmit_lock_owner' accessor/wrapper.
Reported-by: Salvatore Bonaccorso <carnil@debian.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/netdevice.h | 29 +++++++++++++----------------
1 file changed, 13 insertions(+), 16 deletions(-)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 7b34ce34114ac..ce19befe4d87d 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3882,17 +3882,17 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
#ifdef CONFIG_PREEMPT_RT_FULL
static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu)
{
- txq->xmit_lock_owner = current;
+ WRITE_ONCE(txq->xmit_lock_owner, current);
}
static inline void netdev_queue_clear_owner(struct netdev_queue *txq)
{
- txq->xmit_lock_owner = NULL;
+ WRITE_ONCE(txq->xmit_lock_owner, NULL);
}
static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
{
- if (txq->xmit_lock_owner != NULL)
+ if (READ_ONCE(txq->xmit_lock_owner) != NULL)
return true;
return false;
}
@@ -3901,17 +3901,19 @@ static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu)
{
- txq->xmit_lock_owner = cpu;
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, cpu);
}
static inline void netdev_queue_clear_owner(struct netdev_queue *txq)
{
- txq->xmit_lock_owner = -1;
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, -1);
}
static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
{
- if (txq->xmit_lock_owner != -1)
+ if (READ_ONCE(txq->xmit_lock_owner != -1))
return true;
return false;
}
@@ -3920,8 +3922,7 @@ static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
{
spin_lock(&txq->_xmit_lock);
- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
- WRITE_ONCE(txq->xmit_lock_owner, cpu);
+ netdev_queue_set_owner(txq, cpu);
}
static inline bool __netif_tx_acquire(struct netdev_queue *txq)
@@ -3938,8 +3939,7 @@ static inline void __netif_tx_release(struct netdev_queue *txq)
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
{
spin_lock_bh(&txq->_xmit_lock);
- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
- WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
+ netdev_queue_set_owner(txq, smp_processor_id());
}
static inline bool __netif_tx_trylock(struct netdev_queue *txq)
@@ -3947,23 +3947,20 @@ static inline bool __netif_tx_trylock(struct netdev_queue *txq)
bool ok = spin_trylock(&txq->_xmit_lock);
if (likely(ok)) {
- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
- WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
+ netdev_queue_set_owner(txq, smp_processor_id());
}
return ok;
}
static inline void __netif_tx_unlock(struct netdev_queue *txq)
{
- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
- WRITE_ONCE(txq->xmit_lock_owner, -1);
+ netdev_queue_clear_owner(txq);
spin_unlock(&txq->_xmit_lock);
}
static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
{
- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
- WRITE_ONCE(txq->xmit_lock_owner, -1);
+ netdev_queue_clear_owner(txq);
spin_unlock_bh(&txq->_xmit_lock);
}
--
2.34.1
prev parent reply other threads:[~2021-12-22 19:35 UTC|newest]
Thread overview: 2+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-12-17 21:30 [ANNOUNCE] 4.19.221-rt99 Clark Williams
2021-12-22 19:35 ` Sebastian Andrzej Siewior [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=YcN9+vGl7GXAZwJH@linutronix.de \
--to=bigeasy@linutronix.de \
--cc=C.Emde@osadl.org \
--cc=carnil@debian.org \
--cc=daniel.wagner@suse.com \
--cc=jkacur@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-rt-users@vger.kernel.org \
--cc=pavel@denx.de \
--cc=rostedt@goodmis.org \
--cc=tglx@linutronix.de \
--cc=tom.zanussi@linux.intel.com \
--cc=williams@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.