From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-4.0 required=3.0 tests=HEADER_FROM_DIFFERENT_DOMAINS, MAILING_LIST_MULTI,SIGNED_OFF_BY,SPF_PASS,URIBL_BLOCKED autolearn=unavailable autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id A3C2BC10F03 for ; Thu, 25 Apr 2019 10:00:26 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 75542217D7 for ; Thu, 25 Apr 2019 10:00:26 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728431AbfDYKAV (ORCPT ); Thu, 25 Apr 2019 06:00:21 -0400 Received: from Galois.linutronix.de ([146.0.238.70]:57973 "EHLO Galois.linutronix.de" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1728739AbfDYKAU (ORCPT ); Thu, 25 Apr 2019 06:00:20 -0400 Received: from localhost ([127.0.0.1] helo=nanos.tec.linutronix.de) by Galois.linutronix.de with esmtp (Exim 4.80) (envelope-from ) id 1hJbA9-0001vH-Ry; Thu, 25 Apr 2019 11:59:30 +0200 Message-Id: <20190425094802.891724020@linutronix.de> User-Agent: quilt/0.65 Date: Thu, 25 Apr 2019 11:45:12 +0200 From: Thomas Gleixner To: LKML Cc: Josh Poimboeuf , x86@kernel.org, Andy Lutomirski , Steven Rostedt , Alexander Potapenko , Alexey Dobriyan , Andrew Morton , Christoph Lameter , Pekka Enberg , linux-mm@kvack.org, David Rientjes , Catalin Marinas , Dmitry Vyukov , Andrey Ryabinin , kasan-dev@googlegroups.com, Mike Rapoport , Akinobu Mita , Christoph Hellwig , iommu@lists.linux-foundation.org, Robin Murphy , Marek Szyprowski , Johannes Thumshirn , David Sterba , Chris Mason , Josef Bacik , linux-btrfs@vger.kernel.org, dm-devel@redhat.com, Mike Snitzer , Alasdair Kergon , Daniel Vetter , intel-gfx@lists.freedesktop.org, Joonas Lahtinen , Maarten Lankhorst , dri-devel@lists.freedesktop.org, David Airlie , Jani Nikula , Rodrigo Vivi , Tom Zanussi , Miroslav Benes , linux-arch@vger.kernel.org Subject: [patch V3 19/29] lockdep: Simplify stack trace handling References: <20190425094453.875139013@linutronix.de> MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Sender: linux-btrfs-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-btrfs@vger.kernel.org Replace the indirection through struct stack_trace by using the storage array based interfaces and storing the information is a small lockdep specific data structure. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) --- include/linux/lockdep.h | 9 +++++-- kernel/locking/lockdep.c | 55 +++++++++++++++++++++++------------------------ 2 files changed, 35 insertions(+), 29 deletions(-) --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -66,6 +66,11 @@ struct lock_class_key { extern struct lock_class_key __lockdep_no_validate__; +struct lock_trace { + unsigned int nr_entries; + unsigned int offset; +}; + #define LOCKSTAT_POINTS 4 /* @@ -100,7 +105,7 @@ struct lock_class { * IRQ/softirq usage tracking bits: */ unsigned long usage_mask; - struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; + struct lock_trace usage_traces[XXX_LOCK_USAGE_STATES]; /* * Generation counter, when doing certain classes of graph walking, @@ -188,7 +193,7 @@ struct lock_list { struct list_head entry; struct lock_class *class; struct lock_class *links_to; - struct stack_trace trace; + struct lock_trace trace; int distance; /* --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -434,18 +434,14 @@ static void print_lockdep_off(const char #endif } -static int save_trace(struct stack_trace *trace) +static int save_trace(struct lock_trace *trace) { - trace->nr_entries = 0; - trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; - trace->entries = stack_trace + nr_stack_trace_entries; - - trace->skip = 3; - - save_stack_trace(trace); - - trace->max_entries = trace->nr_entries; + unsigned long *entries = stack_trace + nr_stack_trace_entries; + unsigned int max_entries; + trace->offset = nr_stack_trace_entries; + max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; + trace->nr_entries = stack_trace_save(entries, max_entries, 3); nr_stack_trace_entries += trace->nr_entries; if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) { @@ -1196,7 +1192,7 @@ static struct lock_list *alloc_list_entr static int add_lock_to_list(struct lock_class *this, struct lock_class *links_to, struct list_head *head, unsigned long ip, int distance, - struct stack_trace *trace) + struct lock_trace *trace) { struct lock_list *entry; /* @@ -1415,6 +1411,13 @@ static inline int __bfs_backwards(struct * checking. */ +static void print_lock_trace(struct lock_trace *trace, unsigned int spaces) +{ + unsigned long *entries = stack_trace + trace->offset; + + stack_trace_print(entries, trace->nr_entries, spaces); +} + /* * Print a dependency chain entry (this is only done when a deadlock * has been detected): @@ -1427,8 +1430,7 @@ print_circular_bug_entry(struct lock_lis printk("\n-> #%u", depth); print_lock_name(target->class); printk(KERN_CONT ":\n"); - print_stack_trace(&target->trace, 6); - + print_lock_trace(&target->trace, 6); return 0; } @@ -1740,7 +1742,7 @@ static void print_lock_class_header(stru len += printk("%*s %s", depth, "", usage_str[bit]); len += printk(KERN_CONT " at:\n"); - print_stack_trace(class->usage_traces + bit, len); + print_lock_trace(class->usage_traces + bit, len); } } printk("%*s }\n", depth, ""); @@ -1765,7 +1767,7 @@ print_shortest_lock_dependencies(struct do { print_lock_class_header(entry->class, depth); printk("%*s ... acquired at:\n", depth, ""); - print_stack_trace(&entry->trace, 2); + print_lock_trace(&entry->trace, 2); printk("\n"); if (depth == 0 && (entry != root)) { @@ -1878,14 +1880,14 @@ print_bad_irq_dependency(struct task_str print_lock_name(backwards_entry->class); pr_warn("\n... which became %s-irq-safe at:\n", irqclass); - print_stack_trace(backwards_entry->class->usage_traces + bit1, 1); + print_lock_trace(backwards_entry->class->usage_traces + bit1, 1); pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass); print_lock_name(forwards_entry->class); pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass); pr_warn("..."); - print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); + print_lock_trace(forwards_entry->class->usage_traces + bit2, 1); pr_warn("\nother info that might help us debug this:\n\n"); print_irq_lock_scenario(backwards_entry, forwards_entry, @@ -2158,7 +2160,7 @@ check_deadlock(struct task_struct *curr, */ static int check_prev_add(struct task_struct *curr, struct held_lock *prev, - struct held_lock *next, int distance, struct stack_trace *trace) + struct held_lock *next, int distance, struct lock_trace *trace) { struct lock_list *uninitialized_var(target_entry); struct lock_list *entry; @@ -2196,7 +2198,7 @@ check_prev_add(struct task_struct *curr, this.parent = NULL; ret = check_noncircular(&this, hlock_class(prev), &target_entry); if (unlikely(!ret)) { - if (!trace->entries) { + if (!trace->nr_entries) { /* * If save_trace fails here, the printing might * trigger a WARN but because of the !nr_entries it @@ -2252,7 +2254,7 @@ check_prev_add(struct task_struct *curr, return print_bfs_bug(ret); - if (!trace->entries && !save_trace(trace)) + if (!trace->nr_entries && !save_trace(trace)) return 0; /* @@ -2284,14 +2286,9 @@ check_prev_add(struct task_struct *curr, static int check_prevs_add(struct task_struct *curr, struct held_lock *next) { + struct lock_trace trace = { .nr_entries = 0 }; int depth = curr->lockdep_depth; struct held_lock *hlock; - struct stack_trace trace = { - .nr_entries = 0, - .max_entries = 0, - .entries = NULL, - .skip = 0, - }; /* * Debugging checks. @@ -2719,6 +2716,10 @@ static inline int validate_chain(struct { return 1; } + +static void print_lock_trace(struct lock_trace *trace, unsigned int spaces) +{ +} #endif /* @@ -2815,7 +2816,7 @@ print_usage_bug(struct task_struct *curr print_lock(this); pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]); - print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1); + print_lock_trace(hlock_class(this)->usage_traces + prev_bit, 1); print_irqtrace_events(curr); pr_warn("\nother info that might help us debug this:\n"); From mboxrd@z Thu Jan 1 00:00:00 1970 From: Thomas Gleixner Subject: [patch V3 19/29] lockdep: Simplify stack trace handling Date: Thu, 25 Apr 2019 11:45:12 +0200 Message-ID: <20190425094802.891724020@linutronix.de> References: <20190425094453.875139013@linutronix.de> Mime-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: base64 Return-path: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: intel-gfx-bounces@lists.freedesktop.org Sender: "Intel-gfx" To: LKML Cc: Mike Snitzer , David Airlie , Catalin Marinas , dri-devel@lists.freedesktop.org, linux-mm@kvack.org, dm-devel@redhat.com, Alexander Potapenko , Christoph Lameter , Miroslav Benes , Christoph Hellwig , Alasdair Kergon , Marek Szyprowski , linux-arch@vger.kernel.org, x86@kernel.org, kasan-dev@googlegroups.com, Johannes Thumshirn , Andrey Ryabinin , Alexey Dobriyan , intel-gfx@lists.freedesktop.org, David Rientjes , Akinobu Mita , Steven Rostedt , Josef Bacik , Mike Rapoport , Andy Lutomirski , Josh Poimboeuf List-Id: linux-arch.vger.kernel.org UmVwbGFjZSB0aGUgaW5kaXJlY3Rpb24gdGhyb3VnaCBzdHJ1Y3Qgc3RhY2tfdHJhY2UgYnkgdXNp bmcgdGhlIHN0b3JhZ2UKYXJyYXkgYmFzZWQgaW50ZXJmYWNlcyBhbmQgc3RvcmluZyB0aGUgaW5m b3JtYXRpb24gaXMgYSBzbWFsbCBsb2NrZGVwCnNwZWNpZmljIGRhdGEgc3RydWN0dXJlLgoKU2ln bmVkLW9mZi1ieTogVGhvbWFzIEdsZWl4bmVyIDx0Z2x4QGxpbnV0cm9uaXguZGU+CkFja2VkLWJ5 OiBQZXRlciBaaWpsc3RyYSAoSW50ZWwpIDxwZXRlcnpAaW5mcmFkZWFkLm9yZz4KLS0tCiBpbmNs dWRlL2xpbnV4L2xvY2tkZXAuaCAgfCAgICA5ICsrKysrLS0KIGtlcm5lbC9sb2NraW5nL2xvY2tk ZXAuYyB8ICAgNTUgKysrKysrKysrKysrKysrKysrKysrKystLS0tLS0tLS0tLS0tLS0tLS0tLS0t LS0KIDIgZmlsZXMgY2hhbmdlZCwgMzUgaW5zZXJ0aW9ucygrKSwgMjkgZGVsZXRpb25zKC0pCgot LS0gYS9pbmNsdWRlL2xpbnV4L2xvY2tkZXAuaAorKysgYi9pbmNsdWRlL2xpbnV4L2xvY2tkZXAu aApAQCAtNjYsNiArNjYsMTEgQEAgc3RydWN0IGxvY2tfY2xhc3Nfa2V5IHsKIAogZXh0ZXJuIHN0 cnVjdCBsb2NrX2NsYXNzX2tleSBfX2xvY2tkZXBfbm9fdmFsaWRhdGVfXzsKIAorc3RydWN0IGxv Y2tfdHJhY2UgeworCXVuc2lnbmVkIGludAkJbnJfZW50cmllczsKKwl1bnNpZ25lZCBpbnQJCW9m ZnNldDsKK307CisKICNkZWZpbmUgTE9DS1NUQVRfUE9JTlRTCQk0CiAKIC8qCkBAIC0xMDAsNyAr MTA1LDcgQEAgc3RydWN0IGxvY2tfY2xhc3MgewogCSAqIElSUS9zb2Z0aXJxIHVzYWdlIHRyYWNr aW5nIGJpdHM6CiAJICovCiAJdW5zaWduZWQgbG9uZwkJCXVzYWdlX21hc2s7Ci0Jc3RydWN0IHN0 YWNrX3RyYWNlCQl1c2FnZV90cmFjZXNbWFhYX0xPQ0tfVVNBR0VfU1RBVEVTXTsKKwlzdHJ1Y3Qg bG9ja190cmFjZQkJdXNhZ2VfdHJhY2VzW1hYWF9MT0NLX1VTQUdFX1NUQVRFU107CiAKIAkvKgog CSAqIEdlbmVyYXRpb24gY291bnRlciwgd2hlbiBkb2luZyBjZXJ0YWluIGNsYXNzZXMgb2YgZ3Jh cGggd2Fsa2luZywKQEAgLTE4OCw3ICsxOTMsNyBAQCBzdHJ1Y3QgbG9ja19saXN0IHsKIAlzdHJ1 Y3QgbGlzdF9oZWFkCQllbnRyeTsKIAlzdHJ1Y3QgbG9ja19jbGFzcwkJKmNsYXNzOwogCXN0cnVj dCBsb2NrX2NsYXNzCQkqbGlua3NfdG87Ci0Jc3RydWN0IHN0YWNrX3RyYWNlCQl0cmFjZTsKKwlz dHJ1Y3QgbG9ja190cmFjZQkJdHJhY2U7CiAJaW50CQkJCWRpc3RhbmNlOwogCiAJLyoKLS0tIGEv a2VybmVsL2xvY2tpbmcvbG9ja2RlcC5jCisrKyBiL2tlcm5lbC9sb2NraW5nL2xvY2tkZXAuYwpA QCAtNDM0LDE4ICs0MzQsMTQgQEAgc3RhdGljIHZvaWQgcHJpbnRfbG9ja2RlcF9vZmYoY29uc3Qg Y2hhcgogI2VuZGlmCiB9CiAKLXN0YXRpYyBpbnQgc2F2ZV90cmFjZShzdHJ1Y3Qgc3RhY2tfdHJh Y2UgKnRyYWNlKQorc3RhdGljIGludCBzYXZlX3RyYWNlKHN0cnVjdCBsb2NrX3RyYWNlICp0cmFj ZSkKIHsKLQl0cmFjZS0+bnJfZW50cmllcyA9IDA7Ci0JdHJhY2UtPm1heF9lbnRyaWVzID0gTUFY X1NUQUNLX1RSQUNFX0VOVFJJRVMgLSBucl9zdGFja190cmFjZV9lbnRyaWVzOwotCXRyYWNlLT5l bnRyaWVzID0gc3RhY2tfdHJhY2UgKyBucl9zdGFja190cmFjZV9lbnRyaWVzOwotCi0JdHJhY2Ut PnNraXAgPSAzOwotCi0Jc2F2ZV9zdGFja190cmFjZSh0cmFjZSk7Ci0KLQl0cmFjZS0+bWF4X2Vu dHJpZXMgPSB0cmFjZS0+bnJfZW50cmllczsKKwl1bnNpZ25lZCBsb25nICplbnRyaWVzID0gc3Rh Y2tfdHJhY2UgKyBucl9zdGFja190cmFjZV9lbnRyaWVzOworCXVuc2lnbmVkIGludCBtYXhfZW50 cmllczsKIAorCXRyYWNlLT5vZmZzZXQgPSBucl9zdGFja190cmFjZV9lbnRyaWVzOworCW1heF9l bnRyaWVzID0gTUFYX1NUQUNLX1RSQUNFX0VOVFJJRVMgLSBucl9zdGFja190cmFjZV9lbnRyaWVz OworCXRyYWNlLT5ucl9lbnRyaWVzID0gc3RhY2tfdHJhY2Vfc2F2ZShlbnRyaWVzLCBtYXhfZW50 cmllcywgMyk7CiAJbnJfc3RhY2tfdHJhY2VfZW50cmllcyArPSB0cmFjZS0+bnJfZW50cmllczsK IAogCWlmIChucl9zdGFja190cmFjZV9lbnRyaWVzID49IE1BWF9TVEFDS19UUkFDRV9FTlRSSUVT LTEpIHsKQEAgLTExOTYsNyArMTE5Miw3IEBAIHN0YXRpYyBzdHJ1Y3QgbG9ja19saXN0ICphbGxv Y19saXN0X2VudHIKIHN0YXRpYyBpbnQgYWRkX2xvY2tfdG9fbGlzdChzdHJ1Y3QgbG9ja19jbGFz cyAqdGhpcywKIAkJCSAgICBzdHJ1Y3QgbG9ja19jbGFzcyAqbGlua3NfdG8sIHN0cnVjdCBsaXN0 X2hlYWQgKmhlYWQsCiAJCQkgICAgdW5zaWduZWQgbG9uZyBpcCwgaW50IGRpc3RhbmNlLAotCQkJ ICAgIHN0cnVjdCBzdGFja190cmFjZSAqdHJhY2UpCisJCQkgICAgc3RydWN0IGxvY2tfdHJhY2Ug KnRyYWNlKQogewogCXN0cnVjdCBsb2NrX2xpc3QgKmVudHJ5OwogCS8qCkBAIC0xNDE1LDYgKzE0 MTEsMTMgQEAgc3RhdGljIGlubGluZSBpbnQgX19iZnNfYmFja3dhcmRzKHN0cnVjdAogICogY2hl Y2tpbmcuCiAgKi8KIAorc3RhdGljIHZvaWQgcHJpbnRfbG9ja190cmFjZShzdHJ1Y3QgbG9ja190 cmFjZSAqdHJhY2UsIHVuc2lnbmVkIGludCBzcGFjZXMpCit7CisJdW5zaWduZWQgbG9uZyAqZW50 cmllcyA9IHN0YWNrX3RyYWNlICsgdHJhY2UtPm9mZnNldDsKKworCXN0YWNrX3RyYWNlX3ByaW50 KGVudHJpZXMsIHRyYWNlLT5ucl9lbnRyaWVzLCBzcGFjZXMpOworfQorCiAvKgogICogUHJpbnQg YSBkZXBlbmRlbmN5IGNoYWluIGVudHJ5ICh0aGlzIGlzIG9ubHkgZG9uZSB3aGVuIGEgZGVhZGxv Y2sKICAqIGhhcyBiZWVuIGRldGVjdGVkKToKQEAgLTE0MjcsOCArMTQzMCw3IEBAIHByaW50X2Np cmN1bGFyX2J1Z19lbnRyeShzdHJ1Y3QgbG9ja19saXMKIAlwcmludGsoIlxuLT4gIyV1IiwgZGVw dGgpOwogCXByaW50X2xvY2tfbmFtZSh0YXJnZXQtPmNsYXNzKTsKIAlwcmludGsoS0VSTl9DT05U ICI6XG4iKTsKLQlwcmludF9zdGFja190cmFjZSgmdGFyZ2V0LT50cmFjZSwgNik7Ci0KKwlwcmlu dF9sb2NrX3RyYWNlKCZ0YXJnZXQtPnRyYWNlLCA2KTsKIAlyZXR1cm4gMDsKIH0KIApAQCAtMTc0 MCw3ICsxNzQyLDcgQEAgc3RhdGljIHZvaWQgcHJpbnRfbG9ja19jbGFzc19oZWFkZXIoc3RydQog CiAJCQlsZW4gKz0gcHJpbnRrKCIlKnMgICAlcyIsIGRlcHRoLCAiIiwgdXNhZ2Vfc3RyW2JpdF0p OwogCQkJbGVuICs9IHByaW50ayhLRVJOX0NPTlQgIiBhdDpcbiIpOwotCQkJcHJpbnRfc3RhY2tf dHJhY2UoY2xhc3MtPnVzYWdlX3RyYWNlcyArIGJpdCwgbGVuKTsKKwkJCXByaW50X2xvY2tfdHJh Y2UoY2xhc3MtPnVzYWdlX3RyYWNlcyArIGJpdCwgbGVuKTsKIAkJfQogCX0KIAlwcmludGsoIiUq cyB9XG4iLCBkZXB0aCwgIiIpOwpAQCAtMTc2NSw3ICsxNzY3LDcgQEAgcHJpbnRfc2hvcnRlc3Rf bG9ja19kZXBlbmRlbmNpZXMoc3RydWN0CiAJZG8gewogCQlwcmludF9sb2NrX2NsYXNzX2hlYWRl cihlbnRyeS0+Y2xhc3MsIGRlcHRoKTsKIAkJcHJpbnRrKCIlKnMgLi4uIGFjcXVpcmVkIGF0Olxu IiwgZGVwdGgsICIiKTsKLQkJcHJpbnRfc3RhY2tfdHJhY2UoJmVudHJ5LT50cmFjZSwgMik7CisJ CXByaW50X2xvY2tfdHJhY2UoJmVudHJ5LT50cmFjZSwgMik7CiAJCXByaW50aygiXG4iKTsKIAog CQlpZiAoZGVwdGggPT0gMCAmJiAoZW50cnkgIT0gcm9vdCkpIHsKQEAgLTE4NzgsMTQgKzE4ODAs MTQgQEAgcHJpbnRfYmFkX2lycV9kZXBlbmRlbmN5KHN0cnVjdCB0YXNrX3N0cgogCXByaW50X2xv Y2tfbmFtZShiYWNrd2FyZHNfZW50cnktPmNsYXNzKTsKIAlwcl93YXJuKCJcbi4uLiB3aGljaCBi ZWNhbWUgJXMtaXJxLXNhZmUgYXQ6XG4iLCBpcnFjbGFzcyk7CiAKLQlwcmludF9zdGFja190cmFj ZShiYWNrd2FyZHNfZW50cnktPmNsYXNzLT51c2FnZV90cmFjZXMgKyBiaXQxLCAxKTsKKwlwcmlu dF9sb2NrX3RyYWNlKGJhY2t3YXJkc19lbnRyeS0+Y2xhc3MtPnVzYWdlX3RyYWNlcyArIGJpdDEs IDEpOwogCiAJcHJfd2FybigiXG50byBhICVzLWlycS11bnNhZmUgbG9jazpcbiIsIGlycWNsYXNz KTsKIAlwcmludF9sb2NrX25hbWUoZm9yd2FyZHNfZW50cnktPmNsYXNzKTsKIAlwcl93YXJuKCJc bi4uLiB3aGljaCBiZWNhbWUgJXMtaXJxLXVuc2FmZSBhdDpcbiIsIGlycWNsYXNzKTsKIAlwcl93 YXJuKCIuLi4iKTsKIAotCXByaW50X3N0YWNrX3RyYWNlKGZvcndhcmRzX2VudHJ5LT5jbGFzcy0+ dXNhZ2VfdHJhY2VzICsgYml0MiwgMSk7CisJcHJpbnRfbG9ja190cmFjZShmb3J3YXJkc19lbnRy eS0+Y2xhc3MtPnVzYWdlX3RyYWNlcyArIGJpdDIsIDEpOwogCiAJcHJfd2FybigiXG5vdGhlciBp bmZvIHRoYXQgbWlnaHQgaGVscCB1cyBkZWJ1ZyB0aGlzOlxuXG4iKTsKIAlwcmludF9pcnFfbG9j a19zY2VuYXJpbyhiYWNrd2FyZHNfZW50cnksIGZvcndhcmRzX2VudHJ5LApAQCAtMjE1OCw3ICsy MTYwLDcgQEAgY2hlY2tfZGVhZGxvY2soc3RydWN0IHRhc2tfc3RydWN0ICpjdXJyLAogICovCiBz dGF0aWMgaW50CiBjaGVja19wcmV2X2FkZChzdHJ1Y3QgdGFza19zdHJ1Y3QgKmN1cnIsIHN0cnVj dCBoZWxkX2xvY2sgKnByZXYsCi0JICAgICAgIHN0cnVjdCBoZWxkX2xvY2sgKm5leHQsIGludCBk aXN0YW5jZSwgc3RydWN0IHN0YWNrX3RyYWNlICp0cmFjZSkKKwkgICAgICAgc3RydWN0IGhlbGRf bG9jayAqbmV4dCwgaW50IGRpc3RhbmNlLCBzdHJ1Y3QgbG9ja190cmFjZSAqdHJhY2UpCiB7CiAJ c3RydWN0IGxvY2tfbGlzdCAqdW5pbml0aWFsaXplZF92YXIodGFyZ2V0X2VudHJ5KTsKIAlzdHJ1 Y3QgbG9ja19saXN0ICplbnRyeTsKQEAgLTIxOTYsNyArMjE5OCw3IEBAIGNoZWNrX3ByZXZfYWRk KHN0cnVjdCB0YXNrX3N0cnVjdCAqY3VyciwKIAl0aGlzLnBhcmVudCA9IE5VTEw7CiAJcmV0ID0g Y2hlY2tfbm9uY2lyY3VsYXIoJnRoaXMsIGhsb2NrX2NsYXNzKHByZXYpLCAmdGFyZ2V0X2VudHJ5 KTsKIAlpZiAodW5saWtlbHkoIXJldCkpIHsKLQkJaWYgKCF0cmFjZS0+ZW50cmllcykgeworCQlp ZiAoIXRyYWNlLT5ucl9lbnRyaWVzKSB7CiAJCQkvKgogCQkJICogSWYgc2F2ZV90cmFjZSBmYWls cyBoZXJlLCB0aGUgcHJpbnRpbmcgbWlnaHQKIAkJCSAqIHRyaWdnZXIgYSBXQVJOIGJ1dCBiZWNh dXNlIG9mIHRoZSAhbnJfZW50cmllcyBpdApAQCAtMjI1Miw3ICsyMjU0LDcgQEAgY2hlY2tfcHJl dl9hZGQoc3RydWN0IHRhc2tfc3RydWN0ICpjdXJyLAogCQlyZXR1cm4gcHJpbnRfYmZzX2J1Zyhy ZXQpOwogCiAKLQlpZiAoIXRyYWNlLT5lbnRyaWVzICYmICFzYXZlX3RyYWNlKHRyYWNlKSkKKwlp ZiAoIXRyYWNlLT5ucl9lbnRyaWVzICYmICFzYXZlX3RyYWNlKHRyYWNlKSkKIAkJcmV0dXJuIDA7 CiAKIAkvKgpAQCAtMjI4NCwxNCArMjI4Niw5IEBAIGNoZWNrX3ByZXZfYWRkKHN0cnVjdCB0YXNr X3N0cnVjdCAqY3VyciwKIHN0YXRpYyBpbnQKIGNoZWNrX3ByZXZzX2FkZChzdHJ1Y3QgdGFza19z dHJ1Y3QgKmN1cnIsIHN0cnVjdCBoZWxkX2xvY2sgKm5leHQpCiB7CisJc3RydWN0IGxvY2tfdHJh Y2UgdHJhY2UgPSB7IC5ucl9lbnRyaWVzID0gMCB9OwogCWludCBkZXB0aCA9IGN1cnItPmxvY2tk ZXBfZGVwdGg7CiAJc3RydWN0IGhlbGRfbG9jayAqaGxvY2s7Ci0Jc3RydWN0IHN0YWNrX3RyYWNl IHRyYWNlID0gewotCQkubnJfZW50cmllcyA9IDAsCi0JCS5tYXhfZW50cmllcyA9IDAsCi0JCS5l bnRyaWVzID0gTlVMTCwKLQkJLnNraXAgPSAwLAotCX07CiAKIAkvKgogCSAqIERlYnVnZ2luZyBj aGVja3MuCkBAIC0yNzE5LDYgKzI3MTYsMTAgQEAgc3RhdGljIGlubGluZSBpbnQgdmFsaWRhdGVf Y2hhaW4oc3RydWN0CiB7CiAJcmV0dXJuIDE7CiB9CisKK3N0YXRpYyB2b2lkIHByaW50X2xvY2tf dHJhY2Uoc3RydWN0IGxvY2tfdHJhY2UgKnRyYWNlLCB1bnNpZ25lZCBpbnQgc3BhY2VzKQorewor fQogI2VuZGlmCiAKIC8qCkBAIC0yODE1LDcgKzI4MTYsNyBAQCBwcmludF91c2FnZV9idWcoc3Ry dWN0IHRhc2tfc3RydWN0ICpjdXJyCiAJcHJpbnRfbG9jayh0aGlzKTsKIAogCXByX3dhcm4oInsl c30gc3RhdGUgd2FzIHJlZ2lzdGVyZWQgYXQ6XG4iLCB1c2FnZV9zdHJbcHJldl9iaXRdKTsKLQlw cmludF9zdGFja190cmFjZShobG9ja19jbGFzcyh0aGlzKS0+dXNhZ2VfdHJhY2VzICsgcHJldl9i aXQsIDEpOworCXByaW50X2xvY2tfdHJhY2UoaGxvY2tfY2xhc3ModGhpcyktPnVzYWdlX3RyYWNl cyArIHByZXZfYml0LCAxKTsKIAogCXByaW50X2lycXRyYWNlX2V2ZW50cyhjdXJyKTsKIAlwcl93 YXJuKCJcbm90aGVyIGluZm8gdGhhdCBtaWdodCBoZWxwIHVzIGRlYnVnIHRoaXM6XG4iKTsKCgpf X19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fXwpJbnRlbC1nZngg bWFpbGluZyBsaXN0CkludGVsLWdmeEBsaXN0cy5mcmVlZGVza3RvcC5vcmcKaHR0cHM6Ly9saXN0 cy5mcmVlZGVza3RvcC5vcmcvbWFpbG1hbi9saXN0aW5mby9pbnRlbC1nZng= From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-4.0 required=3.0 tests=HEADER_FROM_DIFFERENT_DOMAINS, MAILING_LIST_MULTI,SIGNED_OFF_BY,SPF_PASS,URIBL_BLOCKED autolearn=unavailable autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 85043C282E3 for ; Thu, 25 Apr 2019 10:00:31 +0000 (UTC) Received: from mail.linuxfoundation.org (mail.linuxfoundation.org [140.211.169.12]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 63FE8217D7 for ; Thu, 25 Apr 2019 10:00:31 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 63FE8217D7 Authentication-Results: mail.kernel.org; dmarc=none (p=none dis=none) header.from=linutronix.de Authentication-Results: mail.kernel.org; spf=pass smtp.mailfrom=iommu-bounces@lists.linux-foundation.org Received: from mail.linux-foundation.org (localhost [127.0.0.1]) by mail.linuxfoundation.org (Postfix) with ESMTP id 8E3F21C39; Thu, 25 Apr 2019 10:00:09 +0000 (UTC) Received: from smtp1.linuxfoundation.org (smtp1.linux-foundation.org [172.17.192.35]) by mail.linuxfoundation.org (Postfix) with ESMTPS id 729D41C16 for ; Thu, 25 Apr 2019 09:59:34 +0000 (UTC) X-Greylist: greylisting inactive for cl@linux.com in SQLgrey-1.7.6 X-Greylist: domain auto-whitelisted by SQLgrey-1.7.6 Received: from Galois.linutronix.de (Galois.linutronix.de [146.0.238.70]) by smtp1.linuxfoundation.org (Postfix) with ESMTPS id 5371A854; Thu, 25 Apr 2019 09:59:33 +0000 (UTC) Received: from localhost ([127.0.0.1] helo=nanos.tec.linutronix.de) by Galois.linutronix.de with esmtp (Exim 4.80) (envelope-from ) id 1hJbA9-0001vH-Ry; Thu, 25 Apr 2019 11:59:30 +0200 Message-Id: <20190425094802.891724020@linutronix.de> User-Agent: quilt/0.65 Date: Thu, 25 Apr 2019 11:45:12 +0200 From: Thomas Gleixner To: LKML Subject: [patch V3 19/29] lockdep: Simplify stack trace handling References: <20190425094453.875139013@linutronix.de> MIME-Version: 1.0 Cc: Mike Snitzer , David Airlie , Catalin Marinas , Joonas Lahtinen , dri-devel@lists.freedesktop.org, linux-mm@kvack.org, dm-devel@redhat.com, Alexander Potapenko , Christoph Lameter , Miroslav Benes , Christoph Hellwig , Alasdair Kergon , linux-arch@vger.kernel.org, x86@kernel.org, kasan-dev@googlegroups.com, Johannes Thumshirn , Andrey Ryabinin , Alexey Dobriyan , intel-gfx@lists.freedesktop.org, David Rientjes , Maarten Lankhorst , Akinobu Mita , Steven Rostedt , Josef Bacik , Rodrigo Vivi , Mike Rapoport , Jani Nikula , Andy Lutomirski , Josh Poimboeuf , David Sterba , Dmitry Vyukov , Tom Zanussi , Chris Mason , Pekka Enberg , iommu@lists.linux-foundation.org, Daniel Vetter , Andrew Morton , Robin Murphy , linux-btrfs@vger.kernel.org X-BeenThere: iommu@lists.linux-foundation.org X-Mailman-Version: 2.1.12 Precedence: list List-Id: Development issues for Linux IOMMU support List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Content-Type: text/plain; charset="UTF-8" Content-Transfer-Encoding: 7bit Sender: iommu-bounces@lists.linux-foundation.org Errors-To: iommu-bounces@lists.linux-foundation.org Message-ID: <20190425094512.-SNLdYf6tNv9zIp6bB6nMwjX3qgQDk84NuKYXuoZPuM@z> Replace the indirection through struct stack_trace by using the storage array based interfaces and storing the information is a small lockdep specific data structure. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) --- include/linux/lockdep.h | 9 +++++-- kernel/locking/lockdep.c | 55 +++++++++++++++++++++++------------------------ 2 files changed, 35 insertions(+), 29 deletions(-) --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -66,6 +66,11 @@ struct lock_class_key { extern struct lock_class_key __lockdep_no_validate__; +struct lock_trace { + unsigned int nr_entries; + unsigned int offset; +}; + #define LOCKSTAT_POINTS 4 /* @@ -100,7 +105,7 @@ struct lock_class { * IRQ/softirq usage tracking bits: */ unsigned long usage_mask; - struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; + struct lock_trace usage_traces[XXX_LOCK_USAGE_STATES]; /* * Generation counter, when doing certain classes of graph walking, @@ -188,7 +193,7 @@ struct lock_list { struct list_head entry; struct lock_class *class; struct lock_class *links_to; - struct stack_trace trace; + struct lock_trace trace; int distance; /* --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -434,18 +434,14 @@ static void print_lockdep_off(const char #endif } -static int save_trace(struct stack_trace *trace) +static int save_trace(struct lock_trace *trace) { - trace->nr_entries = 0; - trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; - trace->entries = stack_trace + nr_stack_trace_entries; - - trace->skip = 3; - - save_stack_trace(trace); - - trace->max_entries = trace->nr_entries; + unsigned long *entries = stack_trace + nr_stack_trace_entries; + unsigned int max_entries; + trace->offset = nr_stack_trace_entries; + max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; + trace->nr_entries = stack_trace_save(entries, max_entries, 3); nr_stack_trace_entries += trace->nr_entries; if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) { @@ -1196,7 +1192,7 @@ static struct lock_list *alloc_list_entr static int add_lock_to_list(struct lock_class *this, struct lock_class *links_to, struct list_head *head, unsigned long ip, int distance, - struct stack_trace *trace) + struct lock_trace *trace) { struct lock_list *entry; /* @@ -1415,6 +1411,13 @@ static inline int __bfs_backwards(struct * checking. */ +static void print_lock_trace(struct lock_trace *trace, unsigned int spaces) +{ + unsigned long *entries = stack_trace + trace->offset; + + stack_trace_print(entries, trace->nr_entries, spaces); +} + /* * Print a dependency chain entry (this is only done when a deadlock * has been detected): @@ -1427,8 +1430,7 @@ print_circular_bug_entry(struct lock_lis printk("\n-> #%u", depth); print_lock_name(target->class); printk(KERN_CONT ":\n"); - print_stack_trace(&target->trace, 6); - + print_lock_trace(&target->trace, 6); return 0; } @@ -1740,7 +1742,7 @@ static void print_lock_class_header(stru len += printk("%*s %s", depth, "", usage_str[bit]); len += printk(KERN_CONT " at:\n"); - print_stack_trace(class->usage_traces + bit, len); + print_lock_trace(class->usage_traces + bit, len); } } printk("%*s }\n", depth, ""); @@ -1765,7 +1767,7 @@ print_shortest_lock_dependencies(struct do { print_lock_class_header(entry->class, depth); printk("%*s ... acquired at:\n", depth, ""); - print_stack_trace(&entry->trace, 2); + print_lock_trace(&entry->trace, 2); printk("\n"); if (depth == 0 && (entry != root)) { @@ -1878,14 +1880,14 @@ print_bad_irq_dependency(struct task_str print_lock_name(backwards_entry->class); pr_warn("\n... which became %s-irq-safe at:\n", irqclass); - print_stack_trace(backwards_entry->class->usage_traces + bit1, 1); + print_lock_trace(backwards_entry->class->usage_traces + bit1, 1); pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass); print_lock_name(forwards_entry->class); pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass); pr_warn("..."); - print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); + print_lock_trace(forwards_entry->class->usage_traces + bit2, 1); pr_warn("\nother info that might help us debug this:\n\n"); print_irq_lock_scenario(backwards_entry, forwards_entry, @@ -2158,7 +2160,7 @@ check_deadlock(struct task_struct *curr, */ static int check_prev_add(struct task_struct *curr, struct held_lock *prev, - struct held_lock *next, int distance, struct stack_trace *trace) + struct held_lock *next, int distance, struct lock_trace *trace) { struct lock_list *uninitialized_var(target_entry); struct lock_list *entry; @@ -2196,7 +2198,7 @@ check_prev_add(struct task_struct *curr, this.parent = NULL; ret = check_noncircular(&this, hlock_class(prev), &target_entry); if (unlikely(!ret)) { - if (!trace->entries) { + if (!trace->nr_entries) { /* * If save_trace fails here, the printing might * trigger a WARN but because of the !nr_entries it @@ -2252,7 +2254,7 @@ check_prev_add(struct task_struct *curr, return print_bfs_bug(ret); - if (!trace->entries && !save_trace(trace)) + if (!trace->nr_entries && !save_trace(trace)) return 0; /* @@ -2284,14 +2286,9 @@ check_prev_add(struct task_struct *curr, static int check_prevs_add(struct task_struct *curr, struct held_lock *next) { + struct lock_trace trace = { .nr_entries = 0 }; int depth = curr->lockdep_depth; struct held_lock *hlock; - struct stack_trace trace = { - .nr_entries = 0, - .max_entries = 0, - .entries = NULL, - .skip = 0, - }; /* * Debugging checks. @@ -2719,6 +2716,10 @@ static inline int validate_chain(struct { return 1; } + +static void print_lock_trace(struct lock_trace *trace, unsigned int spaces) +{ +} #endif /* @@ -2815,7 +2816,7 @@ print_usage_bug(struct task_struct *curr print_lock(this); pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]); - print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1); + print_lock_trace(hlock_class(this)->usage_traces + prev_bit, 1); print_irqtrace_events(curr); pr_warn("\nother info that might help us debug this:\n"); _______________________________________________ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu