From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-pl0-f66.google.com ([209.85.160.66]:39700 "EHLO mail-pl0-f66.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1729914AbeGRXjX (ORCPT ); Wed, 18 Jul 2018 19:39:23 -0400 Received: by mail-pl0-f66.google.com with SMTP id p23-v6so2688615plo.6 for ; Wed, 18 Jul 2018 15:59:17 -0700 (PDT) From: Omar Sandoval To: linux-kernel@vger.kernel.org, linux-fsdevel@vger.kernel.org, Andrew Morton Cc: Alexey Dobriyan , Eric Biederman , James Morse , Bhupesh Sharma , kernel-team@fb.com Subject: [PATCH v3 3/8] proc/kcore: replace kclist_lock rwlock with rwsem Date: Wed, 18 Jul 2018 15:58:43 -0700 Message-Id: <521ba449ebe921d905177410fee9222d07882f0d.1531953780.git.osandov@fb.com> In-Reply-To: References: Sender: linux-fsdevel-owner@vger.kernel.org List-ID: From: Omar Sandoval Now we only need kclist_lock from user context and at fs init time, and the following changes need to sleep while holding the kclist_lock. Signed-off-by: Omar Sandoval --- fs/proc/kcore.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index e83f15a4f66d..ae43a97d511d 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -59,7 +59,7 @@ struct memelfnote }; static LIST_HEAD(kclist_head); -static DEFINE_RWLOCK(kclist_lock); +static DECLARE_RWSEM(kclist_lock); static int kcore_need_update = 1; /* This doesn't grab kclist_lock, so it should only be used at init time. */ @@ -117,7 +117,7 @@ static void __kcore_update_ram(struct list_head *list) struct kcore_list *tmp, *pos; LIST_HEAD(garbage); - write_lock(&kclist_lock); + down_write(&kclist_lock); if (xchg(&kcore_need_update, 0)) { list_for_each_entry_safe(pos, tmp, &kclist_head, list) { if (pos->type == KCORE_RAM @@ -128,7 +128,7 @@ static void __kcore_update_ram(struct list_head *list) } else list_splice(list, &garbage); proc_root_kcore->size = get_kcore_size(&nphdr, &size); - write_unlock(&kclist_lock); + up_write(&kclist_lock); free_kclist_ents(&garbage); } @@ -451,11 +451,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) int nphdr; unsigned long start; - read_lock(&kclist_lock); + down_read(&kclist_lock); size = get_kcore_size(&nphdr, &elf_buflen); if (buflen == 0 || *fpos >= size) { - read_unlock(&kclist_lock); + up_read(&kclist_lock); return 0; } @@ -472,11 +472,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) tsz = buflen; elf_buf = kzalloc(elf_buflen, GFP_ATOMIC); if (!elf_buf) { - read_unlock(&kclist_lock); + up_read(&kclist_lock); return -ENOMEM; } elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen); - read_unlock(&kclist_lock); + up_read(&kclist_lock); if (copy_to_user(buffer, elf_buf + *fpos, tsz)) { kfree(elf_buf); return -EFAULT; @@ -491,7 +491,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) if (buflen == 0) return acc; } else - read_unlock(&kclist_lock); + up_read(&kclist_lock); /* * Check to see if our file offset matches with any of @@ -504,12 +504,12 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) while (buflen) { struct kcore_list *m; - read_lock(&kclist_lock); + down_read(&kclist_lock); list_for_each_entry(m, &kclist_head, list) { if (start >= m->addr && start < (m->addr+m->size)) break; } - read_unlock(&kclist_lock); + up_read(&kclist_lock); if (&m->list == &kclist_head) { if (clear_user(buffer, tsz)) -- 2.18.0