All of lore.kernel.org
 help / color / mirror / Atom feed
From: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
To: HATAYAMA Daisuke <d.hatayama@jp.fujitsu.com>
Cc: vgoyal@redhat.com, ebiederm@xmission.com, cpw@sgi.com,
	kumagai-atsushi@mxc.nes.nec.co.jp, lisa.mitchell@hp.com,
	heiko.carstens@de.ibm.com, akpm@linux-foundation.org,
	kexec@lists.infradead.org, linux-kernel@vger.kernel.org
Subject: Re: [PATCH v2 07/20] vmcore: copy non page-size aligned head and tail pages in 2nd kernel
Date: Sun, 10 Mar 2013 14:16:51 +0800	[thread overview]
Message-ID: <513C2553.5050402@cn.fujitsu.com> (raw)
In-Reply-To: <20130302083627.31252.41277.stgit@localhost6.localdomain6>

于 2013年03月02日 16:36, HATAYAMA Daisuke 写道:
> Due to mmap() requirement, we need to copy pages not starting or
> ending with page-size aligned address in 2nd kernel and to map them to
> user-space.
> 
> For example, see the map below:
> 
>     00000000-0001ffff : reserved
>     00010000-0009f7ff : System RAM
>     0009f800-0009ffff : reserved
> 
> where the System RAM ends with 0x9f800 that is not page-size
> aligned. This map is divided into two parts:
> 
>     00010000-0009dfff

00010000-0009efff

>     0009f000-0009f7ff
> 
> and the first one is kept in old memory and the 2nd one is copied into
> buffer on 2nd kernel.
> 
> This kind of non-page-size-aligned area can always occur since any
> part of System RAM can be converted into reserved area at runtime.
> 
> If not doing copying like this and if remapping non page-size aligned
> pages on old memory directly, mmap() had to export memory which is not
> dump target to user-space. In the above example this is reserved
> 0x9f800-0xa0000.
> 
> Signed-off-by: HATAYAMA Daisuke <d.hatayama@jp.fujitsu.com>
> ---
> 
>  fs/proc/vmcore.c |  192 ++++++++++++++++++++++++++++++++++++++++++++++++------
>  1 files changed, 172 insertions(+), 20 deletions(-)
> 
> diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
> index c511cf4..6b071b4 100644
> --- a/fs/proc/vmcore.c
> +++ b/fs/proc/vmcore.c
> @@ -474,11 +474,10 @@ static int __init process_ptload_program_headers_elf64(char *elfptr,
>  						size_t elfsz,
>  						struct list_head *vc_list)
>  {
> -	int i;
> +	int i, rc;
>  	Elf64_Ehdr *ehdr_ptr;
>  	Elf64_Phdr *phdr_ptr;
>  	loff_t vmcore_off;
> -	struct vmcore *new;
>  
>  	ehdr_ptr = (Elf64_Ehdr *)elfptr;
>  	phdr_ptr = (Elf64_Phdr*)(elfptr + ehdr_ptr->e_phoff); /* PT_NOTE hdr */
> @@ -488,20 +487,97 @@ static int __init process_ptload_program_headers_elf64(char *elfptr,
>  						  PAGE_SIZE);
>  
>  	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
> +		u64 start, end, rest;
> +
>  		if (phdr_ptr->p_type != PT_LOAD)
>  			continue;
>  
> -		/* Add this contiguous chunk of memory to vmcore list.*/
> -		new = get_new_element();
> -		if (!new)
> -			return -ENOMEM;
> -		new->paddr = phdr_ptr->p_offset;
> -		new->size = phdr_ptr->p_memsz;
> -		list_add_tail(&new->list, vc_list);
> +		start = phdr_ptr->p_offset;
> +		end = phdr_ptr->p_offset + phdr_ptr->p_memsz;
> +		rest = phdr_ptr->p_memsz;
> +
> +		if (start & ~PAGE_MASK) {
> +			u64 paddr, len;
> +			char *buf;
> +			struct vmcore *new;
> +
> +			paddr = start;
> +			len = min(roundup(start,PAGE_SIZE), end) - start;
> +
> +			buf = (char *)get_zeroed_page(GFP_KERNEL);
> +			if (!buf)
> +				return -ENOMEM;
> +			rc = read_from_oldmem(buf + (start & ~PAGE_MASK), len,
> +					      &paddr, 0);
> +			if (rc < 0) {
> +				free_pages((unsigned long)buf, 0);
> +				return rc;
> +			}
> +
> +			new = get_new_element();
> +			if (!new) {
> +				free_pages((unsigned long)buf, 0);
> +				return -ENOMEM;
> +			}
> +			new->flag |= MEM_TYPE_CURRENT_KERNEL;
> +			new->size = PAGE_SIZE;
> +			new->buf = buf;
> +			list_add_tail(&new->list, vc_list);
> +
> +			rest -= len;
> +		}
> +
> +		if (rest > 0 &&
> +		    roundup(start, PAGE_SIZE) < rounddown(end, PAGE_SIZE)) {
> +			u64 paddr, len;
> +			struct vmcore *new;
> +
> +			paddr = roundup(start, PAGE_SIZE);
> +			len =rounddown(end,PAGE_SIZE)-roundup(start,PAGE_SIZE);
> +
> +			new = get_new_element();
> +			if (!new)
> +				return -ENOMEM;
> +			new->paddr = paddr;
> +			new->size = len;
> +			list_add_tail(&new->list, vc_list);
> +
> +			rest -= len;
> +		}
> +
> +		if (rest > 0) {
> +			u64 paddr, len;
> +			char *buf;
> +			struct vmcore *new;
> +
> +			paddr = rounddown(end, PAGE_SIZE);
> +			len = end - rounddown(end, PAGE_SIZE);
> +
> +			buf = (char *)get_zeroed_page(GFP_KERNEL);
> +			if (!buf)
> +				return -ENOMEM;
> +			rc = read_from_oldmem(buf, len, &paddr, 0);
> +			if (rc < 0) {
> +				free_pages((unsigned long)buf, 0);
> +				return rc;
> +			}
> +
> +			new = get_new_element();
> +			if (!new) {
> +				free_pages((unsigned long)buf, 0);
> +				return -ENOMEM;
> +			}
> +			new->flag |= MEM_TYPE_CURRENT_KERNEL;
> +			new->size = PAGE_SIZE;
> +			new->buf = buf;
> +			list_add_tail(&new->list, vc_list);
> +
> +			rest -= len;
> +		}
>  
>  		/* Update the program header offset. */
>  		phdr_ptr->p_offset = vmcore_off;
> -		vmcore_off = vmcore_off + phdr_ptr->p_memsz;
> +		vmcore_off +=roundup(end,PAGE_SIZE)-rounddown(start,PAGE_SIZE);

Here the code changes phdr_ptr->p_offset to a new page-size aligned offset.
But it seems the phdr_ptr->p_paddr is still the non page-size aligned
physical address? Does the mismatch of a PT_LOAD segment and the physical
memory occur?

Or, later in makedumpfile, it will check the phdr_ptr->paddr to see if it
is page-size aligned and also phdr_ptr->p_memsz to get the real memory size,
not including padding?

>  	}
>  	return 0;
>  }
> @@ -510,11 +586,10 @@ static int __init process_ptload_program_headers_elf32(char *elfptr,
>  						size_t elfsz,
>  						struct list_head *vc_list)
>  {
> -	int i;
> +	int i, rc;
>  	Elf32_Ehdr *ehdr_ptr;
>  	Elf32_Phdr *phdr_ptr;
>  	loff_t vmcore_off;
> -	struct vmcore *new;
>  
>  	ehdr_ptr = (Elf32_Ehdr *)elfptr;
>  	phdr_ptr = (Elf32_Phdr*)(elfptr + ehdr_ptr->e_phoff); /* PT_NOTE hdr */
> @@ -524,20 +599,97 @@ static int __init process_ptload_program_headers_elf32(char *elfptr,
>  						 PAGE_SIZE);
>  
>  	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
> +		u64 start, end, rest;
> +
>  		if (phdr_ptr->p_type != PT_LOAD)
>  			continue;
>  
> -		/* Add this contiguous chunk of memory to vmcore list.*/
> -		new = get_new_element();
> -		if (!new)
> -			return -ENOMEM;
> -		new->paddr = phdr_ptr->p_offset;
> -		new->size = phdr_ptr->p_memsz;
> -		list_add_tail(&new->list, vc_list);
> +		start = phdr_ptr->p_offset;
> +		end = phdr_ptr->p_offset + phdr_ptr->p_memsz;
> +		rest = phdr_ptr->p_memsz;
> +
> +		if (start & ~PAGE_MASK) {
> +			u64 paddr, len;
> +			char *buf;
> +			struct vmcore *new;
> +
> +			paddr = start;
> +			len = min(roundup(start,PAGE_SIZE), end) - start;
> +
> +			buf = (char *)get_zeroed_page(GFP_KERNEL);
> +			if (!buf)
> +				return -ENOMEM;
> +			rc = read_from_oldmem(buf + (start & ~PAGE_MASK), len,
> +					      &paddr, 0);
> +			if (rc < 0) {
> +				free_pages((unsigned long)buf, 0);
> +				return rc;
> +			}
> +
> +			new = get_new_element();
> +			if (!new) {
> +				free_pages((unsigned long)buf, 0);
> +				return -ENOMEM;
> +			}
> +			new->flag |= MEM_TYPE_CURRENT_KERNEL;
> +			new->size = PAGE_SIZE;
> +			new->buf = buf;
> +			list_add_tail(&new->list, vc_list);
> +
> +			rest -= len;
> +		}
> +
> +		if (rest > 0 &&
> +		    roundup(start, PAGE_SIZE) < rounddown(end, PAGE_SIZE)) {
> +			u64 paddr, len;
> +			struct vmcore *new;
> +
> +			paddr = roundup(start, PAGE_SIZE);
> +			len =rounddown(end,PAGE_SIZE)-roundup(start,PAGE_SIZE);
> +
> +			new = get_new_element();
> +			if (!new)
> +				return -ENOMEM;
> +			new->paddr = paddr;
> +			new->size = len;
> +			list_add_tail(&new->list, vc_list);
> +
> +			rest -= len;
> +		}
> +
> +		if (rest > 0) {
> +			u64 paddr, len;
> +			char *buf;
> +			struct vmcore *new;
> +
> +			paddr = rounddown(end, PAGE_SIZE);
> +			len = end - rounddown(end, PAGE_SIZE);
> +
> +			buf = (char *)get_zeroed_page(GFP_KERNEL);
> +			if (!buf)
> +				return -ENOMEM;
> +			rc = read_from_oldmem(buf, len, &paddr, 0);
> +			if (rc < 0) {
> +				free_pages((unsigned long)buf, 0);
> +				return rc;
> +			}
> +
> +			new = get_new_element();
> +			if (!new) {
> +				free_pages((unsigned long)buf, 0);
> +				return -ENOMEM;
> +			}
> +			new->flag |= MEM_TYPE_CURRENT_KERNEL;
> +			new->size = PAGE_SIZE;
> +			new->buf = buf;
> +			list_add_tail(&new->list, vc_list);
> +
> +			rest -= len;
> +		}
>  
>  		/* Update the program header offset */
>  		phdr_ptr->p_offset = vmcore_off;
> -		vmcore_off = vmcore_off + phdr_ptr->p_memsz;
> +		vmcore_off +=roundup(end,PAGE_SIZE)-rounddown(start,PAGE_SIZE);
>  	}
>  	return 0;
>  }
> 
> 
> _______________________________________________
> kexec mailing list
> kexec@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/kexec
> 


WARNING: multiple messages have this Message-ID (diff)
From: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
To: HATAYAMA Daisuke <d.hatayama@jp.fujitsu.com>
Cc: kexec@lists.infradead.org, heiko.carstens@de.ibm.com,
	linux-kernel@vger.kernel.org, lisa.mitchell@hp.com,
	kumagai-atsushi@mxc.nes.nec.co.jp, ebiederm@xmission.com,
	akpm@linux-foundation.org, cpw@sgi.com, vgoyal@redhat.com
Subject: Re: [PATCH v2 07/20] vmcore: copy non page-size aligned head and tail pages in 2nd kernel
Date: Sun, 10 Mar 2013 14:16:51 +0800	[thread overview]
Message-ID: <513C2553.5050402@cn.fujitsu.com> (raw)
In-Reply-To: <20130302083627.31252.41277.stgit@localhost6.localdomain6>

于 2013年03月02日 16:36, HATAYAMA Daisuke 写道:
> Due to mmap() requirement, we need to copy pages not starting or
> ending with page-size aligned address in 2nd kernel and to map them to
> user-space.
> 
> For example, see the map below:
> 
>     00000000-0001ffff : reserved
>     00010000-0009f7ff : System RAM
>     0009f800-0009ffff : reserved
> 
> where the System RAM ends with 0x9f800 that is not page-size
> aligned. This map is divided into two parts:
> 
>     00010000-0009dfff

00010000-0009efff

>     0009f000-0009f7ff
> 
> and the first one is kept in old memory and the 2nd one is copied into
> buffer on 2nd kernel.
> 
> This kind of non-page-size-aligned area can always occur since any
> part of System RAM can be converted into reserved area at runtime.
> 
> If not doing copying like this and if remapping non page-size aligned
> pages on old memory directly, mmap() had to export memory which is not
> dump target to user-space. In the above example this is reserved
> 0x9f800-0xa0000.
> 
> Signed-off-by: HATAYAMA Daisuke <d.hatayama@jp.fujitsu.com>
> ---
> 
>  fs/proc/vmcore.c |  192 ++++++++++++++++++++++++++++++++++++++++++++++++------
>  1 files changed, 172 insertions(+), 20 deletions(-)
> 
> diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
> index c511cf4..6b071b4 100644
> --- a/fs/proc/vmcore.c
> +++ b/fs/proc/vmcore.c
> @@ -474,11 +474,10 @@ static int __init process_ptload_program_headers_elf64(char *elfptr,
>  						size_t elfsz,
>  						struct list_head *vc_list)
>  {
> -	int i;
> +	int i, rc;
>  	Elf64_Ehdr *ehdr_ptr;
>  	Elf64_Phdr *phdr_ptr;
>  	loff_t vmcore_off;
> -	struct vmcore *new;
>  
>  	ehdr_ptr = (Elf64_Ehdr *)elfptr;
>  	phdr_ptr = (Elf64_Phdr*)(elfptr + ehdr_ptr->e_phoff); /* PT_NOTE hdr */
> @@ -488,20 +487,97 @@ static int __init process_ptload_program_headers_elf64(char *elfptr,
>  						  PAGE_SIZE);
>  
>  	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
> +		u64 start, end, rest;
> +
>  		if (phdr_ptr->p_type != PT_LOAD)
>  			continue;
>  
> -		/* Add this contiguous chunk of memory to vmcore list.*/
> -		new = get_new_element();
> -		if (!new)
> -			return -ENOMEM;
> -		new->paddr = phdr_ptr->p_offset;
> -		new->size = phdr_ptr->p_memsz;
> -		list_add_tail(&new->list, vc_list);
> +		start = phdr_ptr->p_offset;
> +		end = phdr_ptr->p_offset + phdr_ptr->p_memsz;
> +		rest = phdr_ptr->p_memsz;
> +
> +		if (start & ~PAGE_MASK) {
> +			u64 paddr, len;
> +			char *buf;
> +			struct vmcore *new;
> +
> +			paddr = start;
> +			len = min(roundup(start,PAGE_SIZE), end) - start;
> +
> +			buf = (char *)get_zeroed_page(GFP_KERNEL);
> +			if (!buf)
> +				return -ENOMEM;
> +			rc = read_from_oldmem(buf + (start & ~PAGE_MASK), len,
> +					      &paddr, 0);
> +			if (rc < 0) {
> +				free_pages((unsigned long)buf, 0);
> +				return rc;
> +			}
> +
> +			new = get_new_element();
> +			if (!new) {
> +				free_pages((unsigned long)buf, 0);
> +				return -ENOMEM;
> +			}
> +			new->flag |= MEM_TYPE_CURRENT_KERNEL;
> +			new->size = PAGE_SIZE;
> +			new->buf = buf;
> +			list_add_tail(&new->list, vc_list);
> +
> +			rest -= len;
> +		}
> +
> +		if (rest > 0 &&
> +		    roundup(start, PAGE_SIZE) < rounddown(end, PAGE_SIZE)) {
> +			u64 paddr, len;
> +			struct vmcore *new;
> +
> +			paddr = roundup(start, PAGE_SIZE);
> +			len =rounddown(end,PAGE_SIZE)-roundup(start,PAGE_SIZE);
> +
> +			new = get_new_element();
> +			if (!new)
> +				return -ENOMEM;
> +			new->paddr = paddr;
> +			new->size = len;
> +			list_add_tail(&new->list, vc_list);
> +
> +			rest -= len;
> +		}
> +
> +		if (rest > 0) {
> +			u64 paddr, len;
> +			char *buf;
> +			struct vmcore *new;
> +
> +			paddr = rounddown(end, PAGE_SIZE);
> +			len = end - rounddown(end, PAGE_SIZE);
> +
> +			buf = (char *)get_zeroed_page(GFP_KERNEL);
> +			if (!buf)
> +				return -ENOMEM;
> +			rc = read_from_oldmem(buf, len, &paddr, 0);
> +			if (rc < 0) {
> +				free_pages((unsigned long)buf, 0);
> +				return rc;
> +			}
> +
> +			new = get_new_element();
> +			if (!new) {
> +				free_pages((unsigned long)buf, 0);
> +				return -ENOMEM;
> +			}
> +			new->flag |= MEM_TYPE_CURRENT_KERNEL;
> +			new->size = PAGE_SIZE;
> +			new->buf = buf;
> +			list_add_tail(&new->list, vc_list);
> +
> +			rest -= len;
> +		}
>  
>  		/* Update the program header offset. */
>  		phdr_ptr->p_offset = vmcore_off;
> -		vmcore_off = vmcore_off + phdr_ptr->p_memsz;
> +		vmcore_off +=roundup(end,PAGE_SIZE)-rounddown(start,PAGE_SIZE);

Here the code changes phdr_ptr->p_offset to a new page-size aligned offset.
But it seems the phdr_ptr->p_paddr is still the non page-size aligned
physical address? Does the mismatch of a PT_LOAD segment and the physical
memory occur?

Or, later in makedumpfile, it will check the phdr_ptr->paddr to see if it
is page-size aligned and also phdr_ptr->p_memsz to get the real memory size,
not including padding?

>  	}
>  	return 0;
>  }
> @@ -510,11 +586,10 @@ static int __init process_ptload_program_headers_elf32(char *elfptr,
>  						size_t elfsz,
>  						struct list_head *vc_list)
>  {
> -	int i;
> +	int i, rc;
>  	Elf32_Ehdr *ehdr_ptr;
>  	Elf32_Phdr *phdr_ptr;
>  	loff_t vmcore_off;
> -	struct vmcore *new;
>  
>  	ehdr_ptr = (Elf32_Ehdr *)elfptr;
>  	phdr_ptr = (Elf32_Phdr*)(elfptr + ehdr_ptr->e_phoff); /* PT_NOTE hdr */
> @@ -524,20 +599,97 @@ static int __init process_ptload_program_headers_elf32(char *elfptr,
>  						 PAGE_SIZE);
>  
>  	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
> +		u64 start, end, rest;
> +
>  		if (phdr_ptr->p_type != PT_LOAD)
>  			continue;
>  
> -		/* Add this contiguous chunk of memory to vmcore list.*/
> -		new = get_new_element();
> -		if (!new)
> -			return -ENOMEM;
> -		new->paddr = phdr_ptr->p_offset;
> -		new->size = phdr_ptr->p_memsz;
> -		list_add_tail(&new->list, vc_list);
> +		start = phdr_ptr->p_offset;
> +		end = phdr_ptr->p_offset + phdr_ptr->p_memsz;
> +		rest = phdr_ptr->p_memsz;
> +
> +		if (start & ~PAGE_MASK) {
> +			u64 paddr, len;
> +			char *buf;
> +			struct vmcore *new;
> +
> +			paddr = start;
> +			len = min(roundup(start,PAGE_SIZE), end) - start;
> +
> +			buf = (char *)get_zeroed_page(GFP_KERNEL);
> +			if (!buf)
> +				return -ENOMEM;
> +			rc = read_from_oldmem(buf + (start & ~PAGE_MASK), len,
> +					      &paddr, 0);
> +			if (rc < 0) {
> +				free_pages((unsigned long)buf, 0);
> +				return rc;
> +			}
> +
> +			new = get_new_element();
> +			if (!new) {
> +				free_pages((unsigned long)buf, 0);
> +				return -ENOMEM;
> +			}
> +			new->flag |= MEM_TYPE_CURRENT_KERNEL;
> +			new->size = PAGE_SIZE;
> +			new->buf = buf;
> +			list_add_tail(&new->list, vc_list);
> +
> +			rest -= len;
> +		}
> +
> +		if (rest > 0 &&
> +		    roundup(start, PAGE_SIZE) < rounddown(end, PAGE_SIZE)) {
> +			u64 paddr, len;
> +			struct vmcore *new;
> +
> +			paddr = roundup(start, PAGE_SIZE);
> +			len =rounddown(end,PAGE_SIZE)-roundup(start,PAGE_SIZE);
> +
> +			new = get_new_element();
> +			if (!new)
> +				return -ENOMEM;
> +			new->paddr = paddr;
> +			new->size = len;
> +			list_add_tail(&new->list, vc_list);
> +
> +			rest -= len;
> +		}
> +
> +		if (rest > 0) {
> +			u64 paddr, len;
> +			char *buf;
> +			struct vmcore *new;
> +
> +			paddr = rounddown(end, PAGE_SIZE);
> +			len = end - rounddown(end, PAGE_SIZE);
> +
> +			buf = (char *)get_zeroed_page(GFP_KERNEL);
> +			if (!buf)
> +				return -ENOMEM;
> +			rc = read_from_oldmem(buf, len, &paddr, 0);
> +			if (rc < 0) {
> +				free_pages((unsigned long)buf, 0);
> +				return rc;
> +			}
> +
> +			new = get_new_element();
> +			if (!new) {
> +				free_pages((unsigned long)buf, 0);
> +				return -ENOMEM;
> +			}
> +			new->flag |= MEM_TYPE_CURRENT_KERNEL;
> +			new->size = PAGE_SIZE;
> +			new->buf = buf;
> +			list_add_tail(&new->list, vc_list);
> +
> +			rest -= len;
> +		}
>  
>  		/* Update the program header offset */
>  		phdr_ptr->p_offset = vmcore_off;
> -		vmcore_off = vmcore_off + phdr_ptr->p_memsz;
> +		vmcore_off +=roundup(end,PAGE_SIZE)-rounddown(start,PAGE_SIZE);
>  	}
>  	return 0;
>  }
> 
> 
> _______________________________________________
> kexec mailing list
> kexec@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/kexec
> 


_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

  reply	other threads:[~2013-03-10  7:12 UTC|newest]

Thread overview: 82+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-03-02  8:35 [PATCH v2 00/20] kdump, vmcore: support mmap() on /proc/vmcore HATAYAMA Daisuke
2013-03-02  8:35 ` HATAYAMA Daisuke
2013-03-02  8:35 ` [PATCH v2 01/20] vmcore: refer to e_phoff member explicitly HATAYAMA Daisuke
2013-03-02  8:35   ` HATAYAMA Daisuke
2013-03-05  7:35   ` Zhang Yanfei
2013-03-05  7:35     ` Zhang Yanfei
2013-03-10  6:46     ` Zhang Yanfei
2013-03-10  6:46       ` Zhang Yanfei
2013-03-11  0:31       ` HATAYAMA Daisuke
2013-03-11  0:31         ` HATAYAMA Daisuke
2013-03-11 17:36         ` Vivek Goyal
2013-03-11 17:36           ` Vivek Goyal
2013-03-02  8:35 ` [PATCH v2 02/20] vmcore: rearrange program headers without assuming consequtive PT_NOTE entries HATAYAMA Daisuke
2013-03-02  8:35   ` HATAYAMA Daisuke
2013-03-05  8:36   ` Zhang Yanfei
2013-03-05  8:36     ` Zhang Yanfei
2013-03-05  9:02     ` HATAYAMA Daisuke
2013-03-05  9:02       ` HATAYAMA Daisuke
2013-03-05  9:35       ` Zhang Yanfei
2013-03-05  9:35         ` Zhang Yanfei
2013-03-02  8:36 ` [PATCH v2 03/20] vmcore, sysfs: export ELF note segment size instead of vmcoreinfo data size HATAYAMA Daisuke
2013-03-02  8:36   ` HATAYAMA Daisuke
2013-03-05  9:29   ` Zhang Yanfei
2013-03-05  9:29     ` Zhang Yanfei
2013-03-06  0:07   ` HATAYAMA Daisuke
2013-03-06  0:07     ` HATAYAMA Daisuke
2013-03-02  8:36 ` [PATCH v2 04/20] vmcore: allocate buffer for ELF headers on page-size alignment HATAYAMA Daisuke
2013-03-02  8:36   ` HATAYAMA Daisuke
2013-03-06  6:57   ` Zhang Yanfei
2013-03-06  6:57     ` Zhang Yanfei
2013-03-06  9:14     ` HATAYAMA Daisuke
2013-03-06  9:14       ` HATAYAMA Daisuke
2013-03-02  8:36 ` [PATCH v2 05/20] vmcore: round up buffer size of ELF headers by PAGE_SIZE HATAYAMA Daisuke
2013-03-02  8:36   ` HATAYAMA Daisuke
2013-03-06 15:51   ` Yanfei Zhang
2013-03-06 15:51     ` Yanfei Zhang
2013-03-02  8:36 ` [PATCH v2 06/20] vmcore, procfs: introduce a flag to distinguish objects copied in 2nd kernel HATAYAMA Daisuke
2013-03-02  8:36   ` HATAYAMA Daisuke
2013-03-06 15:55   ` Yanfei Zhang
2013-03-06 15:55     ` Yanfei Zhang
2013-03-02  8:36 ` [PATCH v2 07/20] vmcore: copy non page-size aligned head and tail pages " HATAYAMA Daisuke
2013-03-02  8:36   ` HATAYAMA Daisuke
2013-03-10  6:16   ` Zhang Yanfei [this message]
2013-03-10  6:16     ` Zhang Yanfei
2013-03-11  0:27     ` HATAYAMA Daisuke
2013-03-11  0:27       ` HATAYAMA Daisuke
2013-03-02  8:36 ` [PATCH v2 08/20] vmcore: modify vmcore clean-up function to free buffer on " HATAYAMA Daisuke
2013-03-02  8:36   ` HATAYAMA Daisuke
2013-03-02  8:36 ` [PATCH v2 09/20] vmcore: clean up read_vmcore() HATAYAMA Daisuke
2013-03-02  8:36   ` HATAYAMA Daisuke
2013-03-02  8:36 ` [PATCH v2 10/20] vmcore: read buffers for vmcore objects copied from old memory HATAYAMA Daisuke
2013-03-02  8:36   ` HATAYAMA Daisuke
2013-03-02  8:36 ` [PATCH v2 11/20] vmcore: allocate per-cpu crash_notes objects on page-size boundary HATAYAMA Daisuke
2013-03-02  8:36   ` HATAYAMA Daisuke
2013-03-02  8:36 ` [PATCH v2 12/20] kexec: allocate vmcoreinfo note buffer " HATAYAMA Daisuke
2013-03-02  8:36   ` HATAYAMA Daisuke
2013-03-02  8:37 ` [PATCH v2 13/20] kexec, elf: introduce NT_VMCORE_DEBUGINFO note type HATAYAMA Daisuke
2013-03-02  8:37   ` HATAYAMA Daisuke
2013-03-02  8:37 ` [PATCH v2 14/20] elf: introduce NT_VMCORE_PAD type HATAYAMA Daisuke
2013-03-02  8:37   ` HATAYAMA Daisuke
2013-03-02  8:37 ` [PATCH v2 15/20] kexec: fill note buffers by NT_VMCORE_PAD notes in page-size boundary HATAYAMA Daisuke
2013-03-02  8:37   ` HATAYAMA Daisuke
2013-03-07 10:11   ` Zhang Yanfei
2013-03-07 10:11     ` Zhang Yanfei
2013-03-08  1:55     ` HATAYAMA Daisuke
2013-03-08  1:55       ` HATAYAMA Daisuke
2013-03-08 13:02       ` Yanfei Zhang
2013-03-08 13:02         ` Yanfei Zhang
2013-03-09  3:46         ` HATAYAMA Daisuke
2013-03-09  3:46           ` HATAYAMA Daisuke
2013-03-10  2:33           ` Zhang Yanfei
2013-03-10  2:33             ` Zhang Yanfei
2013-03-02  8:37 ` [PATCH v2 16/20] vmcore: check NT_VMCORE_PAD as a mark indicating the end of ELF note buffer HATAYAMA Daisuke
2013-03-02  8:37   ` HATAYAMA Daisuke
2013-03-02  8:37 ` [PATCH v2 17/20] vmcore: check if vmcore objects satify mmap()'s page-size boundary requirement HATAYAMA Daisuke
2013-03-02  8:37   ` HATAYAMA Daisuke
2013-03-02  8:37 ` [PATCH v2 18/20] vmcore: round-up offset of vmcore object in page-size boundary HATAYAMA Daisuke
2013-03-02  8:37   ` HATAYAMA Daisuke
2013-03-02  8:37 ` [PATCH v2 19/20] vmcore: count holes generated by round-up operation for vmcore size HATAYAMA Daisuke
2013-03-02  8:37   ` HATAYAMA Daisuke
2013-03-02  8:37 ` [PATCH v2 20/20] vmcore: introduce mmap_vmcore() HATAYAMA Daisuke
2013-03-02  8:37   ` HATAYAMA Daisuke

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=513C2553.5050402@cn.fujitsu.com \
    --to=zhangyanfei@cn.fujitsu.com \
    --cc=akpm@linux-foundation.org \
    --cc=cpw@sgi.com \
    --cc=d.hatayama@jp.fujitsu.com \
    --cc=ebiederm@xmission.com \
    --cc=heiko.carstens@de.ibm.com \
    --cc=kexec@lists.infradead.org \
    --cc=kumagai-atsushi@mxc.nes.nec.co.jp \
    --cc=linux-kernel@vger.kernel.org \
    --cc=lisa.mitchell@hp.com \
    --cc=vgoyal@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.