linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: wangkeqi <wangkeqi_chris@163.com>
To: davem@davemloft.net, edumazet@google.com, kuba@kernel.org,
	pabeni@redhat.com
Cc: netdev@vger.kernel.org, linux-kernel@vger.kernel.org,
	fw@strlen.de, wangkeqi <wangkeqiwang@didiglobal.com>,
	kernel test robot <oliver.sang@intel.com>,
	fengwei.yin@intel.com
Subject: [PATCH net v3] connector: cn_netlink_has_listeners replaces proc_event_num_listeners
Date: Wed, 24 Jan 2024 19:44:37 +0800	[thread overview]
Message-ID: <20240124114437.160930-1-wangkeqi_chris@163.com> (raw)

From: wangkeqi <wangkeqiwang@didiglobal.com>

It is inaccurate to judge whether proc_event_num_listeners is
cleared by cn_netlink_send_mult returning -ESRCH.
In the case of stress-ng netlink-proc, -ESRCH will always be returned,
because netlink_broadcast_filtered will return -ESRCH,
which may cause stress-ng netlink-proc performance degradation.
If the judgment condition is modified to whether there is a listener.
proc_event_num_listeners will still be wrong due to concurrency.
So replace the counter with cn_netlink_has_listeners

Reported-by: kernel test robot <oliver.sang@intel.com>
Closes: https://lore.kernel.org/oe-lkp/202401112259.b23a1567-oliver.sang@intel.com
Fixes: c46bfba133 ("connector: Fix proc_event_num_listeners count not cleared")
Signed-off-by: wangkeqi <wangkeqiwang@didiglobal.com>
Cc: fengwei.yin@intel.com
Cc: fw@strlen.de
---
 drivers/connector/cn_proc.c   | 33 +++++++++++++++++++++------------
 drivers/connector/connector.c |  9 +++++++++
 include/linux/connector.h     |  1 +
 3 files changed, 31 insertions(+), 12 deletions(-)

diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 3d5e6d705..4898e974c 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -85,6 +85,16 @@ static int cn_filter(struct sock *dsk, struct sk_buff *skb, void *data)
 	return 1;
 }
 
+static int cn_netlink_has_listeners(void)
+{
+	struct sock *sk = get_cdev_nls();
+
+	if (sk)
+		return netlink_has_listeners(sk, CN_IDX_PROC);
+	else
+		return 0;
+}
+
 static inline void send_msg(struct cn_msg *msg)
 {
 	__u32 filter_data[2];
@@ -108,9 +118,8 @@ static inline void send_msg(struct cn_msg *msg)
 		filter_data[1] = 0;
 	}
 
-	if (cn_netlink_send_mult(msg, msg->len, 0, CN_IDX_PROC, GFP_NOWAIT,
-			     cn_filter, (void *)filter_data) == -ESRCH)
-		atomic_set(&proc_event_num_listeners, 0);
+	cn_netlink_send_mult(msg, msg->len, 0, CN_IDX_PROC, GFP_NOWAIT,
+			     cn_filter, (void *)filter_data);
 
 	local_unlock(&local_event.lock);
 }
@@ -122,7 +131,7 @@ void proc_fork_connector(struct task_struct *task)
 	__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
 	struct task_struct *parent;
 
-	if (atomic_read(&proc_event_num_listeners) < 1)
+	if (!cn_netlink_has_listeners())
 		return;
 
 	msg = buffer_to_cn_msg(buffer);
@@ -151,7 +160,7 @@ void proc_exec_connector(struct task_struct *task)
 	struct proc_event *ev;
 	__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
 
-	if (atomic_read(&proc_event_num_listeners) < 1)
+	if (!cn_netlink_has_listeners())
 		return;
 
 	msg = buffer_to_cn_msg(buffer);
@@ -176,7 +185,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
 	__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
 	const struct cred *cred;
 
-	if (atomic_read(&proc_event_num_listeners) < 1)
+	if (!cn_netlink_has_listeners())
 		return;
 
 	msg = buffer_to_cn_msg(buffer);
@@ -213,7 +222,7 @@ void proc_sid_connector(struct task_struct *task)
 	struct proc_event *ev;
 	__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
 
-	if (atomic_read(&proc_event_num_listeners) < 1)
+	if (!cn_netlink_has_listeners())
 		return;
 
 	msg = buffer_to_cn_msg(buffer);
@@ -237,7 +246,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
 	struct proc_event *ev;
 	__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
 
-	if (atomic_read(&proc_event_num_listeners) < 1)
+	if (!cn_netlink_has_listeners())
 		return;
 
 	msg = buffer_to_cn_msg(buffer);
@@ -269,7 +278,7 @@ void proc_comm_connector(struct task_struct *task)
 	struct proc_event *ev;
 	__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
 
-	if (atomic_read(&proc_event_num_listeners) < 1)
+	if (!cn_netlink_has_listeners())
 		return;
 
 	msg = buffer_to_cn_msg(buffer);
@@ -295,7 +304,7 @@ void proc_coredump_connector(struct task_struct *task)
 	struct task_struct *parent;
 	__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
 
-	if (atomic_read(&proc_event_num_listeners) < 1)
+	if (!cn_netlink_has_listeners())
 		return;
 
 	msg = buffer_to_cn_msg(buffer);
@@ -328,7 +337,7 @@ void proc_exit_connector(struct task_struct *task)
 	struct task_struct *parent;
 	__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
 
-	if (atomic_read(&proc_event_num_listeners) < 1)
+	if (!cn_netlink_has_listeners())
 		return;
 
 	msg = buffer_to_cn_msg(buffer);
@@ -370,7 +379,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
 	struct proc_event *ev;
 	__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
 
-	if (atomic_read(&proc_event_num_listeners) < 1)
+	if (!cn_netlink_has_listeners())
 		return;
 
 	msg = buffer_to_cn_msg(buffer);
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 7f7b94f61..42bcb39ba 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -129,6 +129,15 @@ int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group,
 }
 EXPORT_SYMBOL_GPL(cn_netlink_send);
 
+struct sock *get_cdev_nls(void)
+{
+	if (cn_already_initialized == 1)
+		return cdev.nls;
+	else
+		return NULL;
+}
+EXPORT_SYMBOL_GPL(get_cdev_nls);
+
 /*
  * Callback helper - queues work and setup destructor for given data.
  */
diff --git a/include/linux/connector.h b/include/linux/connector.h
index cec2d99ae..255466aea 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -127,6 +127,7 @@ int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid,
  */
 int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 group, gfp_t gfp_mask);
 
+struct sock *get_cdev_nls(void);
 int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
 			  const struct cb_id *id,
 			  void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
-- 
2.27.0


             reply	other threads:[~2024-01-24 11:46 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-01-24 11:44 wangkeqi [this message]
2024-01-31  0:55 ` [PATCH net v3] connector: cn_netlink_has_listeners replaces proc_event_num_listeners Jakub Kicinski

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240124114437.160930-1-wangkeqi_chris@163.com \
    --to=wangkeqi_chris@163.com \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=fengwei.yin@intel.com \
    --cc=fw@strlen.de \
    --cc=kuba@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=netdev@vger.kernel.org \
    --cc=oliver.sang@intel.com \
    --cc=pabeni@redhat.com \
    --cc=wangkeqiwang@didiglobal.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).