From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753574Ab3CEHFY (ORCPT ); Tue, 5 Mar 2013 02:05:24 -0500 Received: from fgwmail5.fujitsu.co.jp ([192.51.44.35]:44506 "EHLO fgwmail5.fujitsu.co.jp" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752467Ab3CEHFT (ORCPT ); Tue, 5 Mar 2013 02:05:19 -0500 From: HATAYAMA Daisuke Subject: [PATCH v2 04/20] vmcore: allocate buffer for ELF headers on page-size alignment To: vgoyal@redhat.com, ebiederm@xmission.com, cpw@sgi.com, kumagai-atsushi@mxc.nes.nec.co.jp, lisa.mitchell@hp.com, heiko.carstens@de.ibm.com, akpm@linux-foundation.org Cc: kexec@lists.infradead.org, linux-kernel@vger.kernel.org Date: Sat, 02 Mar 2013 17:36:11 +0900 Message-ID: <20130302083610.31252.18601.stgit@localhost6.localdomain6> In-Reply-To: <20130302083447.31252.93914.stgit@localhost6.localdomain6> References: <20130302083447.31252.93914.stgit@localhost6.localdomain6> User-Agent: StGIT/0.14.3 MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 7bit Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Allocate buffer for ELF headers on page-size aligned boudary to satisfy mmap() requirement. For this, __get_free_pages() is used instead of kmalloc(). Also, later patch will decrease actually used buffer size for ELF headers, so it's necessary to keep original buffer size and actually used buffer size separately. elfcorebuf_sz_orig keeps the original one and elfcorebuf_sz the actually used one. Signed-off-by: HATAYAMA Daisuke --- fs/proc/vmcore.c | 30 +++++++++++++++++++++--------- 1 files changed, 21 insertions(+), 9 deletions(-) diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index b5c9e33..1b02d01 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -31,6 +31,7 @@ static LIST_HEAD(vmcore_list); /* Stores the pointer to the buffer containing kernel elf core headers. */ static char *elfcorebuf; static size_t elfcorebuf_sz; +static size_t elfcorebuf_sz_orig; /* Total size of vmcore file. */ static u64 vmcore_size; @@ -610,26 +611,31 @@ static int __init parse_crash_elf64_headers(void) /* Read in all elf headers. */ elfcorebuf_sz = ehdr.e_phoff + ehdr.e_phnum * sizeof(Elf64_Phdr); - elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL); + elfcorebuf_sz_orig = elfcorebuf_sz; + elfcorebuf = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(elfcorebuf_sz_orig)); if (!elfcorebuf) return -ENOMEM; addr = elfcorehdr_addr; rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0); if (rc < 0) { - kfree(elfcorebuf); + free_pages((unsigned long)elfcorebuf, + get_order(elfcorebuf_sz_orig)); return rc; } /* Merge all PT_NOTE headers into one. */ rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, &vmcore_list); if (rc) { - kfree(elfcorebuf); + free_pages((unsigned long)elfcorebuf, + get_order(elfcorebuf_sz_orig)); return rc; } rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz, &vmcore_list); if (rc) { - kfree(elfcorebuf); + free_pages((unsigned long)elfcorebuf, + get_order(elfcorebuf_sz_orig)); return rc; } set_vmcore_list_offsets_elf64(elfcorebuf, &vmcore_list); @@ -665,26 +671,31 @@ static int __init parse_crash_elf32_headers(void) /* Read in all elf headers. */ elfcorebuf_sz = ehdr.e_phoff + ehdr.e_phnum * sizeof(Elf32_Phdr); - elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL); + elfcorebuf_sz_orig = elfcorebuf_sz; + elfcorebuf = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(elfcorebuf_sz)); if (!elfcorebuf) return -ENOMEM; addr = elfcorehdr_addr; rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0); if (rc < 0) { - kfree(elfcorebuf); + free_pages((unsigned long)elfcorebuf, + get_order(elfcorebuf_sz_orig)); return rc; } /* Merge all PT_NOTE headers into one. */ rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, &vmcore_list); if (rc) { - kfree(elfcorebuf); + free_pages((unsigned long)elfcorebuf, + get_order(elfcorebuf_sz_orig)); return rc; } rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz, &vmcore_list); if (rc) { - kfree(elfcorebuf); + free_pages((unsigned long)elfcorebuf, + get_order(elfcorebuf_sz_orig)); return rc; } set_vmcore_list_offsets_elf32(elfcorebuf, &vmcore_list); @@ -766,7 +777,8 @@ void vmcore_cleanup(void) list_del(&m->list); kfree(m); } - kfree(elfcorebuf); + free_pages((unsigned long)elfcorebuf, + get_order(elfcorebuf_sz_orig)); elfcorebuf = NULL; } EXPORT_SYMBOL_GPL(vmcore_cleanup); From mboxrd@z Thu Jan 1 00:00:00 1970 Return-path: Received: from fgwmail6.fujitsu.co.jp ([192.51.44.36]) by merlin.infradead.org with esmtps (Exim 4.80.1 #2 (Red Hat Linux)) id 1UClw7-0006ee-Tt for kexec@lists.infradead.org; Tue, 05 Mar 2013 07:05:21 +0000 Received: from m2.gw.fujitsu.co.jp (unknown [10.0.50.72]) by fgwmail6.fujitsu.co.jp (Postfix) with ESMTP id 6122F3EE0CB for ; Tue, 5 Mar 2013 16:05:18 +0900 (JST) Received: from smail (m2 [127.0.0.1]) by outgoing.m2.gw.fujitsu.co.jp (Postfix) with ESMTP id 3B7CB45DE5B for ; Tue, 5 Mar 2013 16:05:18 +0900 (JST) Received: from s2.gw.fujitsu.co.jp (s2.gw.fujitsu.co.jp [10.0.50.92]) by m2.gw.fujitsu.co.jp (Postfix) with ESMTP id 7831945DE52 for ; Tue, 5 Mar 2013 16:05:17 +0900 (JST) Received: from s2.gw.fujitsu.co.jp (localhost.localdomain [127.0.0.1]) by s2.gw.fujitsu.co.jp (Postfix) with ESMTP id 648F9E08003 for ; Tue, 5 Mar 2013 16:05:17 +0900 (JST) Received: from ml14.s.css.fujitsu.com (ml14.s.css.fujitsu.com [10.240.81.134]) by s2.gw.fujitsu.co.jp (Postfix) with ESMTP id 0B3831DB8038 for ; Tue, 5 Mar 2013 16:05:17 +0900 (JST) From: HATAYAMA Daisuke Subject: [PATCH v2 04/20] vmcore: allocate buffer for ELF headers on page-size alignment Date: Sat, 02 Mar 2013 17:36:11 +0900 Message-ID: <20130302083610.31252.18601.stgit@localhost6.localdomain6> In-Reply-To: <20130302083447.31252.93914.stgit@localhost6.localdomain6> References: <20130302083447.31252.93914.stgit@localhost6.localdomain6> MIME-Version: 1.0 List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Sender: "kexec" Errors-To: kexec-bounces+dwmw2=infradead.org@lists.infradead.org To: vgoyal@redhat.com, ebiederm@xmission.com, cpw@sgi.com, kumagai-atsushi@mxc.nes.nec.co.jp, lisa.mitchell@hp.com, heiko.carstens@de.ibm.com, akpm@linux-foundation.org Cc: kexec@lists.infradead.org, linux-kernel@vger.kernel.org Allocate buffer for ELF headers on page-size aligned boudary to satisfy mmap() requirement. For this, __get_free_pages() is used instead of kmalloc(). Also, later patch will decrease actually used buffer size for ELF headers, so it's necessary to keep original buffer size and actually used buffer size separately. elfcorebuf_sz_orig keeps the original one and elfcorebuf_sz the actually used one. Signed-off-by: HATAYAMA Daisuke --- fs/proc/vmcore.c | 30 +++++++++++++++++++++--------- 1 files changed, 21 insertions(+), 9 deletions(-) diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index b5c9e33..1b02d01 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -31,6 +31,7 @@ static LIST_HEAD(vmcore_list); /* Stores the pointer to the buffer containing kernel elf core headers. */ static char *elfcorebuf; static size_t elfcorebuf_sz; +static size_t elfcorebuf_sz_orig; /* Total size of vmcore file. */ static u64 vmcore_size; @@ -610,26 +611,31 @@ static int __init parse_crash_elf64_headers(void) /* Read in all elf headers. */ elfcorebuf_sz = ehdr.e_phoff + ehdr.e_phnum * sizeof(Elf64_Phdr); - elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL); + elfcorebuf_sz_orig = elfcorebuf_sz; + elfcorebuf = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(elfcorebuf_sz_orig)); if (!elfcorebuf) return -ENOMEM; addr = elfcorehdr_addr; rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0); if (rc < 0) { - kfree(elfcorebuf); + free_pages((unsigned long)elfcorebuf, + get_order(elfcorebuf_sz_orig)); return rc; } /* Merge all PT_NOTE headers into one. */ rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, &vmcore_list); if (rc) { - kfree(elfcorebuf); + free_pages((unsigned long)elfcorebuf, + get_order(elfcorebuf_sz_orig)); return rc; } rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz, &vmcore_list); if (rc) { - kfree(elfcorebuf); + free_pages((unsigned long)elfcorebuf, + get_order(elfcorebuf_sz_orig)); return rc; } set_vmcore_list_offsets_elf64(elfcorebuf, &vmcore_list); @@ -665,26 +671,31 @@ static int __init parse_crash_elf32_headers(void) /* Read in all elf headers. */ elfcorebuf_sz = ehdr.e_phoff + ehdr.e_phnum * sizeof(Elf32_Phdr); - elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL); + elfcorebuf_sz_orig = elfcorebuf_sz; + elfcorebuf = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(elfcorebuf_sz)); if (!elfcorebuf) return -ENOMEM; addr = elfcorehdr_addr; rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0); if (rc < 0) { - kfree(elfcorebuf); + free_pages((unsigned long)elfcorebuf, + get_order(elfcorebuf_sz_orig)); return rc; } /* Merge all PT_NOTE headers into one. */ rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, &vmcore_list); if (rc) { - kfree(elfcorebuf); + free_pages((unsigned long)elfcorebuf, + get_order(elfcorebuf_sz_orig)); return rc; } rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz, &vmcore_list); if (rc) { - kfree(elfcorebuf); + free_pages((unsigned long)elfcorebuf, + get_order(elfcorebuf_sz_orig)); return rc; } set_vmcore_list_offsets_elf32(elfcorebuf, &vmcore_list); @@ -766,7 +777,8 @@ void vmcore_cleanup(void) list_del(&m->list); kfree(m); } - kfree(elfcorebuf); + free_pages((unsigned long)elfcorebuf, + get_order(elfcorebuf_sz_orig)); elfcorebuf = NULL; } EXPORT_SYMBOL_GPL(vmcore_cleanup); _______________________________________________ kexec mailing list kexec@lists.infradead.org http://lists.infradead.org/mailman/listinfo/kexec