linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Darrick J. Wong" <djwong@kernel.org>
To: Shiyang Ruan <ruansy.fnst@fujitsu.com>
Cc: linux-kernel@vger.kernel.org, linux-xfs@vger.kernel.org,
	nvdimm@lists.linux.dev, linux-mm@kvack.org,
	linux-fsdevel@vger.kernel.org, dan.j.williams@intel.com,
	david@fromorbit.com, hch@infradead.org, jane.chu@oracle.com,
	Christoph Hellwig <hch@lst.de>
Subject: Re: [PATCH v13 5/7] mm: Introduce mf_dax_kill_procs() for fsdax case
Date: Wed, 20 Apr 2022 10:58:04 -0700	[thread overview]
Message-ID: <20220420175804.GY17025@magnolia> (raw)
In-Reply-To: <20220419045045.1664996-6-ruansy.fnst@fujitsu.com>

On Tue, Apr 19, 2022 at 12:50:43PM +0800, Shiyang Ruan wrote:
> This new function is a variant of mf_generic_kill_procs that accepts a
> file, offset pair instead of a struct to support multiple files sharing
> a DAX mapping.  It is intended to be called by the file systems as part
> of the memory_failure handler after the file system performed a reverse
> mapping from the storage address to the file and file offset.
> 
> Signed-off-by: Shiyang Ruan <ruansy.fnst@fujitsu.com>
> Reviewed-by: Dan Williams <dan.j.williams@intel.com>
> Reviewed-by: Christoph Hellwig <hch@lst.de>

Looks ok,
Reviewed-by: Darrick J. Wong <djwong@kernel.org>

--D

> ---
>  include/linux/mm.h  |  2 +
>  mm/memory-failure.c | 96 ++++++++++++++++++++++++++++++++++++++++-----
>  2 files changed, 88 insertions(+), 10 deletions(-)
> 
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index ad4b6c15c814..52208d743546 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -3233,6 +3233,8 @@ enum mf_flags {
>  	MF_SOFT_OFFLINE = 1 << 3,
>  	MF_UNPOISON = 1 << 4,
>  };
> +int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
> +		      unsigned long count, int mf_flags);
>  extern int memory_failure(unsigned long pfn, int flags);
>  extern void memory_failure_queue(unsigned long pfn, int flags);
>  extern void memory_failure_queue_kick(int cpu);
> diff --git a/mm/memory-failure.c b/mm/memory-failure.c
> index a40e79e634a4..dc47c5f83d85 100644
> --- a/mm/memory-failure.c
> +++ b/mm/memory-failure.c
> @@ -295,10 +295,9 @@ void shake_page(struct page *p)
>  }
>  EXPORT_SYMBOL_GPL(shake_page);
>  
> -static unsigned long dev_pagemap_mapping_shift(struct page *page,
> -		struct vm_area_struct *vma)
> +static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma,
> +		unsigned long address)
>  {
> -	unsigned long address = vma_address(page, vma);
>  	unsigned long ret = 0;
>  	pgd_t *pgd;
>  	p4d_t *p4d;
> @@ -338,10 +337,14 @@ static unsigned long dev_pagemap_mapping_shift(struct page *page,
>  /*
>   * Schedule a process for later kill.
>   * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
> + *
> + * Notice: @fsdax_pgoff is used only when @p is a fsdax page.
> + *   In other cases, such as anonymous and file-backend page, the address to be
> + *   killed can be caculated by @p itself.
>   */
>  static void add_to_kill(struct task_struct *tsk, struct page *p,
> -		       struct vm_area_struct *vma,
> -		       struct list_head *to_kill)
> +			pgoff_t fsdax_pgoff, struct vm_area_struct *vma,
> +			struct list_head *to_kill)
>  {
>  	struct to_kill *tk;
>  
> @@ -352,9 +355,15 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
>  	}
>  
>  	tk->addr = page_address_in_vma(p, vma);
> -	if (is_zone_device_page(p))
> -		tk->size_shift = dev_pagemap_mapping_shift(p, vma);
> -	else
> +	if (is_zone_device_page(p)) {
> +		/*
> +		 * Since page->mapping is not used for fsdax, we need
> +		 * calculate the address based on the vma.
> +		 */
> +		if (p->pgmap->type == MEMORY_DEVICE_FS_DAX)
> +			tk->addr = vma_pgoff_address(fsdax_pgoff, 1, vma);
> +		tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
> +	} else
>  		tk->size_shift = page_shift(compound_head(p));
>  
>  	/*
> @@ -503,7 +512,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
>  			if (!page_mapped_in_vma(page, vma))
>  				continue;
>  			if (vma->vm_mm == t->mm)
> -				add_to_kill(t, page, vma, to_kill);
> +				add_to_kill(t, page, 0, vma, to_kill);
>  		}
>  	}
>  	read_unlock(&tasklist_lock);
> @@ -539,13 +548,41 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
>  			 * to be informed of all such data corruptions.
>  			 */
>  			if (vma->vm_mm == t->mm)
> -				add_to_kill(t, page, vma, to_kill);
> +				add_to_kill(t, page, 0, vma, to_kill);
>  		}
>  	}
>  	read_unlock(&tasklist_lock);
>  	i_mmap_unlock_read(mapping);
>  }
>  
> +#if IS_ENABLED(CONFIG_FS_DAX)
> +/*
> + * Collect processes when the error hit a fsdax page.
> + */
> +static void collect_procs_fsdax(struct page *page,
> +		struct address_space *mapping, pgoff_t pgoff,
> +		struct list_head *to_kill)
> +{
> +	struct vm_area_struct *vma;
> +	struct task_struct *tsk;
> +
> +	i_mmap_lock_read(mapping);
> +	read_lock(&tasklist_lock);
> +	for_each_process(tsk) {
> +		struct task_struct *t = task_early_kill(tsk, true);
> +
> +		if (!t)
> +			continue;
> +		vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
> +			if (vma->vm_mm == t->mm)
> +				add_to_kill(t, page, pgoff, vma, to_kill);
> +		}
> +	}
> +	read_unlock(&tasklist_lock);
> +	i_mmap_unlock_read(mapping);
> +}
> +#endif /* CONFIG_FS_DAX */
> +
>  /*
>   * Collect the processes who have the corrupted page mapped to kill.
>   */
> @@ -1582,6 +1619,45 @@ static int mf_generic_kill_procs(unsigned long long pfn, int flags,
>  	return rc;
>  }
>  
> +#ifdef CONFIG_FS_DAX
> +/**
> + * mf_dax_kill_procs - Collect and kill processes who are using this file range
> + * @mapping:	the file in use
> + * @index:	start pgoff of the range within the file
> + * @count:	length of the range, in unit of PAGE_SIZE
> + * @mf_flags:	memory failure flags
> + */
> +int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
> +		unsigned long count, int mf_flags)
> +{
> +	LIST_HEAD(to_kill);
> +	dax_entry_t cookie;
> +	struct page *page;
> +	size_t end = index + count;
> +
> +	mf_flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
> +
> +	for (; index < end; index++) {
> +		page = NULL;
> +		cookie = dax_lock_mapping_entry(mapping, index, &page);
> +		if (!cookie)
> +			return -EBUSY;
> +		if (!page)
> +			goto unlock;
> +
> +		SetPageHWPoison(page);
> +
> +		collect_procs_fsdax(page, mapping, index, &to_kill);
> +		unmap_and_kill(&to_kill, page_to_pfn(page), mapping,
> +				index, mf_flags);
> +unlock:
> +		dax_unlock_mapping_entry(mapping, index, cookie);
> +	}
> +	return 0;
> +}
> +EXPORT_SYMBOL_GPL(mf_dax_kill_procs);
> +#endif /* CONFIG_FS_DAX */
> +
>  /*
>   * Called from hugetlb code with hugetlb_lock held.
>   *
> -- 
> 2.35.1
> 
> 
> 

  reply	other threads:[~2022-04-20 17:58 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-19  4:50 [PATCH v13 0/7] fsdax: introduce fs query to support reflink Shiyang Ruan
2022-04-19  4:50 ` [PATCH v13 1/7] dax: Introduce holder for dax_device Shiyang Ruan
2022-04-20 17:42   ` Darrick J. Wong
2022-04-19  4:50 ` [PATCH v13 2/7] mm: factor helpers for memory_failure_dev_pagemap Shiyang Ruan
2022-04-21  6:13   ` HORIGUCHI NAOYA(堀口 直也)
2022-04-21  8:10     ` Shiyang Ruan
2022-04-21  8:12     ` Miaohe Lin
2022-04-19  4:50 ` [PATCH v13 3/7] pagemap,pmem: Introduce ->memory_failure() Shiyang Ruan
2022-04-20 17:45   ` Darrick J. Wong
2022-04-21  6:54   ` HORIGUCHI NAOYA(堀口 直也)
2022-04-21  8:24   ` Miaohe Lin
2022-04-22  7:06     ` Shiyang Ruan
2022-04-24  2:00       ` Miaohe Lin
2022-04-19  4:50 ` [PATCH v13 4/7] fsdax: Introduce dax_lock_mapping_entry() Shiyang Ruan
2022-04-20 17:53   ` Darrick J. Wong
2022-04-19  4:50 ` [PATCH v13 5/7] mm: Introduce mf_dax_kill_procs() for fsdax case Shiyang Ruan
2022-04-20 17:58   ` Darrick J. Wong [this message]
2022-04-21  8:47   ` Miaohe Lin
2022-04-21 12:50   ` HORIGUCHI NAOYA(堀口 直也)
2022-04-19  4:50 ` [PATCH v13 6/7] xfs: Implement ->notify_failure() for XFS Shiyang Ruan
2022-04-19 15:38   ` Darrick J. Wong
2022-04-20  7:33     ` [PATCH v13.1 " Shiyang Ruan
2022-04-20 17:30       ` Darrick J. Wong
2022-04-19  4:50 ` [PATCH v13 7/7] fsdax: set a CoW flag when associate reflink mappings Shiyang Ruan
2022-04-19  7:27   ` Christoph Hellwig
2022-04-20 17:35   ` Darrick J. Wong
2022-04-21  1:20 ` [PATCH v13 0/7] fsdax: introduce fs query to support reflink Dave Chinner
2022-04-21  1:48   ` Shiyang Ruan
2022-04-21  2:20     ` Dan Williams
2022-04-21  4:35       ` Dave Chinner
2022-04-21  5:47         ` HORIGUCHI NAOYA(堀口 直也)
2022-04-21  5:54         ` Christoph Hellwig
2022-04-21  7:46           ` Dave Chinner
2022-04-22 21:27             ` Dan Williams
2022-04-23  0:01               ` Dave Chinner
2022-04-23 17:32                 ` Dan Williams

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220420175804.GY17025@magnolia \
    --to=djwong@kernel.org \
    --cc=dan.j.williams@intel.com \
    --cc=david@fromorbit.com \
    --cc=hch@infradead.org \
    --cc=hch@lst.de \
    --cc=jane.chu@oracle.com \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-xfs@vger.kernel.org \
    --cc=nvdimm@lists.linux.dev \
    --cc=ruansy.fnst@fujitsu.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).