* + fs-epoll-use-a-per-cpu-counter-for-users-watches-count-fix.patch added to -mm tree
@ 2021-08-04 0:06 akpm
0 siblings, 0 replies; 2+ messages in thread
From: akpm @ 2021-08-04 0:06 UTC (permalink / raw)
To: mm-commits, viro, npiggin, anton, akpm
The patch titled
Subject: fs-epoll-use-a-per-cpu-counter-for-users-watches-count-fix
has been added to the -mm tree. Its filename is
fs-epoll-use-a-per-cpu-counter-for-users-watches-count-fix.patch
This patch should soon appear at
https://ozlabs.org/~akpm/mmots/broken-out/fs-epoll-use-a-per-cpu-counter-for-users-watches-count-fix.patch
and later at
https://ozlabs.org/~akpm/mmotm/broken-out/fs-epoll-use-a-per-cpu-counter-for-users-watches-count-fix.patch
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next and is updated
there every 3-4 working days
------------------------------------------------------
From: Andrew Morton <akpm@linux-foundation.org>
Subject: fs-epoll-use-a-per-cpu-counter-for-users-watches-count-fix
fix build, fix printk typo
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Anton Blanchard <anton@ozlabs.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
kernel/user.c | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
--- a/kernel/user.c~fs-epoll-use-a-per-cpu-counter-for-users-watches-count-fix
+++ a/kernel/user.c
@@ -138,7 +138,9 @@ static void free_user(struct user_struct
{
uid_hash_remove(up);
spin_unlock_irqrestore(&uidhash_lock, flags);
+#ifdef CONFIG_EPOLL
percpu_counter_destroy(&up->epoll_watches);
+#endif
kmem_cache_free(uid_cachep, up);
}
@@ -186,10 +188,12 @@ struct user_struct *alloc_uid(kuid_t uid
new->uid = uid;
refcount_set(&new->__count, 1);
+#ifdef CONFIG_EPOLL
if (percpu_counter_init(&new->epoll_watches, 0, GFP_KERNEL)) {
kmem_cache_free(uid_cachep, new);
return NULL;
}
+#endif
ratelimit_state_init(&new->ratelimit, HZ, 100);
ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);
@@ -200,7 +204,9 @@ struct user_struct *alloc_uid(kuid_t uid
spin_lock_irq(&uidhash_lock);
up = uid_hash_find(uid, hashent);
if (up) {
+#ifdef CONFIG_EPOLL
percpu_counter_destroy(&new->epoll_watches);
+#endif
kmem_cache_free(uid_cachep, new);
} else {
uid_hash_insert(new, hashent);
@@ -222,8 +228,10 @@ static int __init uid_cache_init(void)
for(n = 0; n < UIDHASH_SZ; ++n)
INIT_HLIST_HEAD(uidhash_table + n);
+#ifdef CONFIG_EPOLL
if (percpu_counter_init(&root_user.epoll_watches, 0, GFP_KERNEL))
- panic("percpu cpunter alloc failed");
+ panic("percpu counter alloc failed");
+#endif
/* Insert the root user immediately (init already runs as root) */
spin_lock_irq(&uidhash_lock);
_
Patches currently in -mm which might be from akpm@linux-foundation.org are
mm.patch
lazy-tlb-allow-lazy-tlb-mm-refcounting-to-be-configurable-fix.patch
mm-compaction-optimize-proactive-compaction-deferrals-fix.patch
mm-compaction-support-triggering-of-proactive-compaction-by-user-fix.patch
mm-mempolicy-convert-from-atomic_t-to-refcount_t-on-mempolicy-refcnt-fix.patch
fs-epoll-use-a-per-cpu-counter-for-users-watches-count-fix.patch
log-if-a-core-dump-is-aborted-due-to-changed-file-permissions-fix.patch
linux-next-rejects.patch
kernel-forkc-export-kernel_thread-to-modules.patch
^ permalink raw reply [flat|nested] 2+ messages in thread
* + fs-epoll-use-a-per-cpu-counter-for-users-watches-count-fix.patch added to -mm tree
@ 2021-08-05 23:53 akpm
0 siblings, 0 replies; 2+ messages in thread
From: akpm @ 2021-08-05 23:53 UTC (permalink / raw)
To: mm-commits, sfr, npiggin, linux, broonie, rdunlap
The patch titled
Subject: fs-epoll-use-a-per-cpu-counter-for-users-watches-count-fix
has been added to the -mm tree. Its filename is
fs-epoll-use-a-per-cpu-counter-for-users-watches-count-fix.patch
This patch should soon appear at
https://ozlabs.org/~akpm/mmots/broken-out/fs-epoll-use-a-per-cpu-counter-for-users-watches-count-fix.patch
and later at
https://ozlabs.org/~akpm/mmotm/broken-out/fs-epoll-use-a-per-cpu-counter-for-users-watches-count-fix.patch
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next and is updated
there every 3-4 working days
------------------------------------------------------
From: Randy Dunlap <rdunlap@infradead.org>
Subject: fs-epoll-use-a-per-cpu-counter-for-users-watches-count-fix
fix build errors in kernel/user.c when CONFIG_EPOLL=n
Also fix typo: "cpunter" -"counter" in a panic message.
[npiggin@gmail.com: move ifdefs into wrapper functions, slightly improve panic message]
Link: https://lkml.kernel.org/r/1628051945.fens3r99ox.astroid@bobo.none
Fixes: e75b89477811 ("fs/epoll: use a per-cpu counter for user's watches count")
Signed-off-by: Randy Dunlap <rdunlap@infradead.org>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Cc: Mark Brown <broonie@kernel.org>
Cc: Guenter Roeck <linux@roeck-us.net>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
kernel/user.c | 25 ++++++++++++++++++++-----
1 file changed, 20 insertions(+), 5 deletions(-)
--- a/kernel/user.c~fs-epoll-use-a-per-cpu-counter-for-users-watches-count-fix
+++ a/kernel/user.c
@@ -129,6 +129,21 @@ static struct user_struct *uid_hash_find
return NULL;
}
+static int user_epoll_alloc(struct user_struct *up)
+{
+#ifdef CONFIG_EPOLL
+ return percpu_counter_init(&up->epoll_watches, 0, GFP_KERNEL);
+#endif
+ return 0;
+}
+
+static void user_epoll_free(struct user_struct *up)
+{
+#ifdef CONFIG_EPOLL
+ percpu_counter_destroy(&up->epoll_watches);
+#endif
+}
+
/* IRQs are disabled and uidhash_lock is held upon function entry.
* IRQ state (as stored in flags) is restored and uidhash_lock released
* upon function exit.
@@ -138,7 +153,7 @@ static void free_user(struct user_struct
{
uid_hash_remove(up);
spin_unlock_irqrestore(&uidhash_lock, flags);
- percpu_counter_destroy(&up->epoll_watches);
+ user_epoll_free(up);
kmem_cache_free(uid_cachep, up);
}
@@ -186,7 +201,7 @@ struct user_struct *alloc_uid(kuid_t uid
new->uid = uid;
refcount_set(&new->__count, 1);
- if (percpu_counter_init(&new->epoll_watches, 0, GFP_KERNEL)) {
+ if (user_epoll_alloc(new)) {
kmem_cache_free(uid_cachep, new);
return NULL;
}
@@ -200,7 +215,7 @@ struct user_struct *alloc_uid(kuid_t uid
spin_lock_irq(&uidhash_lock);
up = uid_hash_find(uid, hashent);
if (up) {
- percpu_counter_destroy(&new->epoll_watches);
+ user_epoll_free(new);
kmem_cache_free(uid_cachep, new);
} else {
uid_hash_insert(new, hashent);
@@ -222,8 +237,8 @@ static int __init uid_cache_init(void)
for(n = 0; n < UIDHASH_SZ; ++n)
INIT_HLIST_HEAD(uidhash_table + n);
- if (percpu_counter_init(&root_user.epoll_watches, 0, GFP_KERNEL))
- panic("percpu cpunter alloc failed");
+ if (user_epoll_alloc(&root_user))
+ panic("root_user epoll percpu counter alloc failed");
/* Insert the root user immediately (init already runs as root) */
spin_lock_irq(&uidhash_lock);
_
Patches currently in -mm which might be from rdunlap@infradead.org are
fs-epoll-use-a-per-cpu-counter-for-users-watches-count-fix.patch
scripts-check_extable-fix-typo-in-user-error-message.patch
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2021-08-05 23:53 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-08-04 0:06 + fs-epoll-use-a-per-cpu-counter-for-users-watches-count-fix.patch added to -mm tree akpm
2021-08-05 23:53 akpm
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).