From: John Hubbard <jhubbard@nvidia.com> To: ira.weiny@intel.com, Andrew Morton <akpm@linux-foundation.org> Cc: Jason Gunthorpe <jgg@ziepe.ca>, Dan Williams <dan.j.williams@intel.com>, Matthew Wilcox <willy@infradead.org>, Jan Kara <jack@suse.cz>, Theodore Ts'o <tytso@mit.edu>, Michal Hocko <mhocko@suse.com>, Dave Chinner <david@fromorbit.com>, linux-xfs@vger.kernel.org, linux-rdma@vger.kernel.org, linux-kernel@vger.kernel.org, linux-fsdevel@vger.kernel.org, linux-nvdimm@lists.01.org, linux-ext4@vger.kernel.org, linux-mm@kvack.org Subject: Re: [RFC PATCH v2 09/19] mm/gup: Introduce vaddr_pin structure Date: Fri, 9 Aug 2019 17:06:44 -0700 [thread overview] Message-ID: <e92723cf-97a1-9860-9482-8466ff2feaa8@nvidia.com> (raw) In-Reply-To: <20190809225833.6657-10-ira.weiny@intel.com> On 8/9/19 3:58 PM, ira.weiny@intel.com wrote: > From: Ira Weiny <ira.weiny@intel.com> > > Some subsystems need to pass owning file information to GUP calls to > allow for GUP to associate the "owning file" to any files being pinned > within the GUP call. > > Introduce an object to specify this information and pass it down through > some of the GUP call stack. > > Signed-off-by: Ira Weiny <ira.weiny@intel.com> > --- > include/linux/mm.h | 9 +++++++++ > mm/gup.c | 36 ++++++++++++++++++++++-------------- > 2 files changed, 31 insertions(+), 14 deletions(-) > Looks good, although you may want to combine it with the next patch. Otherwise it feels like a "to be continued" when you're reading them. Either way, though: Reviewed-by: John Hubbard <jhubbard@nvidia.com> thanks, -- John Hubbard NVIDIA > diff --git a/include/linux/mm.h b/include/linux/mm.h > index 04f22722b374..befe150d17be 100644 > --- a/include/linux/mm.h > +++ b/include/linux/mm.h > @@ -971,6 +971,15 @@ static inline bool is_zone_device_page(const struct page *page) > } > #endif > > +/** > + * @f_owner The file who "owns this GUP" > + * @mm The mm who "owns this GUP" > + */ > +struct vaddr_pin { > + struct file *f_owner; > + struct mm_struct *mm; > +}; > + > #ifdef CONFIG_DEV_PAGEMAP_OPS > void __put_devmap_managed_page(struct page *page); > DECLARE_STATIC_KEY_FALSE(devmap_managed_key); > diff --git a/mm/gup.c b/mm/gup.c > index 0b05e22ac05f..7a449500f0a6 100644 > --- a/mm/gup.c > +++ b/mm/gup.c > @@ -1005,7 +1005,8 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk, > struct page **pages, > struct vm_area_struct **vmas, > int *locked, > - unsigned int flags) > + unsigned int flags, > + struct vaddr_pin *vaddr_pin) > { > long ret, pages_done; > bool lock_dropped; > @@ -1165,7 +1166,8 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, > > return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, > locked, > - gup_flags | FOLL_TOUCH | FOLL_REMOTE); > + gup_flags | FOLL_TOUCH | FOLL_REMOTE, > + NULL); > } > EXPORT_SYMBOL(get_user_pages_remote); > > @@ -1320,7 +1322,8 @@ static long __get_user_pages_locked(struct task_struct *tsk, > struct mm_struct *mm, unsigned long start, > unsigned long nr_pages, struct page **pages, > struct vm_area_struct **vmas, int *locked, > - unsigned int foll_flags) > + unsigned int foll_flags, > + struct vaddr_pin *vaddr_pin) > { > struct vm_area_struct *vma; > unsigned long vm_flags; > @@ -1504,7 +1507,7 @@ static long check_and_migrate_cma_pages(struct task_struct *tsk, > */ > nr_pages = __get_user_pages_locked(tsk, mm, start, nr_pages, > pages, vmas, NULL, > - gup_flags); > + gup_flags, NULL); > > if ((nr_pages > 0) && migrate_allow) { > drain_allow = true; > @@ -1537,7 +1540,8 @@ static long __gup_longterm_locked(struct task_struct *tsk, > unsigned long nr_pages, > struct page **pages, > struct vm_area_struct **vmas, > - unsigned int gup_flags) > + unsigned int gup_flags, > + struct vaddr_pin *vaddr_pin) > { > struct vm_area_struct **vmas_tmp = vmas; > unsigned long flags = 0; > @@ -1558,7 +1562,7 @@ static long __gup_longterm_locked(struct task_struct *tsk, > } > > rc = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, > - vmas_tmp, NULL, gup_flags); > + vmas_tmp, NULL, gup_flags, vaddr_pin); > > if (gup_flags & FOLL_LONGTERM) { > memalloc_nocma_restore(flags); > @@ -1588,10 +1592,11 @@ static __always_inline long __gup_longterm_locked(struct task_struct *tsk, > unsigned long nr_pages, > struct page **pages, > struct vm_area_struct **vmas, > - unsigned int flags) > + unsigned int flags, > + struct vaddr_pin *vaddr_pin) > { > return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, > - NULL, flags); > + NULL, flags, vaddr_pin); > } > #endif /* CONFIG_FS_DAX || CONFIG_CMA */ > > @@ -1607,7 +1612,8 @@ long get_user_pages(unsigned long start, unsigned long nr_pages, > struct vm_area_struct **vmas) > { > return __gup_longterm_locked(current, current->mm, start, nr_pages, > - pages, vmas, gup_flags | FOLL_TOUCH); > + pages, vmas, gup_flags | FOLL_TOUCH, > + NULL); > } > EXPORT_SYMBOL(get_user_pages); > > @@ -1647,7 +1653,7 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages, > > return __get_user_pages_locked(current, current->mm, start, nr_pages, > pages, NULL, locked, > - gup_flags | FOLL_TOUCH); > + gup_flags | FOLL_TOUCH, NULL); > } > EXPORT_SYMBOL(get_user_pages_locked); > > @@ -1684,7 +1690,7 @@ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, > > down_read(&mm->mmap_sem); > ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL, > - &locked, gup_flags | FOLL_TOUCH); > + &locked, gup_flags | FOLL_TOUCH, NULL); > if (locked) > up_read(&mm->mmap_sem); > return ret; > @@ -2377,7 +2383,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, > EXPORT_SYMBOL_GPL(__get_user_pages_fast); > > static int __gup_longterm_unlocked(unsigned long start, int nr_pages, > - unsigned int gup_flags, struct page **pages) > + unsigned int gup_flags, struct page **pages, > + struct vaddr_pin *vaddr_pin) > { > int ret; > > @@ -2389,7 +2396,8 @@ static int __gup_longterm_unlocked(unsigned long start, int nr_pages, > down_read(¤t->mm->mmap_sem); > ret = __gup_longterm_locked(current, current->mm, > start, nr_pages, > - pages, NULL, gup_flags); > + pages, NULL, gup_flags, > + vaddr_pin); > up_read(¤t->mm->mmap_sem); > } else { > ret = get_user_pages_unlocked(start, nr_pages, > @@ -2448,7 +2456,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, > pages += nr; > > ret = __gup_longterm_unlocked(start, nr_pages - nr, > - gup_flags, pages); > + gup_flags, pages, NULL); > > /* Have to be a bit careful with return values */ > if (nr > 0) { >
WARNING: multiple messages have this Message-ID (diff)
From: John Hubbard <jhubbard@nvidia.com> To: <ira.weiny@intel.com>, Andrew Morton <akpm@linux-foundation.org> Cc: Jason Gunthorpe <jgg@ziepe.ca>, Dan Williams <dan.j.williams@intel.com>, Matthew Wilcox <willy@infradead.org>, Jan Kara <jack@suse.cz>, Theodore Ts'o <tytso@mit.edu>, Michal Hocko <mhocko@suse.com>, Dave Chinner <david@fromorbit.com>, <linux-xfs@vger.kernel.org>, <linux-rdma@vger.kernel.org>, <linux-kernel@vger.kernel.org>, <linux-fsdevel@vger.kernel.org>, <linux-nvdimm@lists.01.org>, <linux-ext4@vger.kernel.org>, <linux-mm@kvack.org> Subject: Re: [RFC PATCH v2 09/19] mm/gup: Introduce vaddr_pin structure Date: Fri, 9 Aug 2019 17:06:44 -0700 [thread overview] Message-ID: <e92723cf-97a1-9860-9482-8466ff2feaa8@nvidia.com> (raw) Message-ID: <20190810000644.3jb0eJsCvMdDqsPcYP1Zh__z0xuBZvVo5EBixdZ07hs@z> (raw) In-Reply-To: <20190809225833.6657-10-ira.weiny@intel.com> On 8/9/19 3:58 PM, ira.weiny@intel.com wrote: > From: Ira Weiny <ira.weiny@intel.com> > > Some subsystems need to pass owning file information to GUP calls to > allow for GUP to associate the "owning file" to any files being pinned > within the GUP call. > > Introduce an object to specify this information and pass it down through > some of the GUP call stack. > > Signed-off-by: Ira Weiny <ira.weiny@intel.com> > --- > include/linux/mm.h | 9 +++++++++ > mm/gup.c | 36 ++++++++++++++++++++++-------------- > 2 files changed, 31 insertions(+), 14 deletions(-) > Looks good, although you may want to combine it with the next patch. Otherwise it feels like a "to be continued" when you're reading them. Either way, though: Reviewed-by: John Hubbard <jhubbard@nvidia.com> thanks, -- John Hubbard NVIDIA > diff --git a/include/linux/mm.h b/include/linux/mm.h > index 04f22722b374..befe150d17be 100644 > --- a/include/linux/mm.h > +++ b/include/linux/mm.h > @@ -971,6 +971,15 @@ static inline bool is_zone_device_page(const struct page *page) > } > #endif > > +/** > + * @f_owner The file who "owns this GUP" > + * @mm The mm who "owns this GUP" > + */ > +struct vaddr_pin { > + struct file *f_owner; > + struct mm_struct *mm; > +}; > + > #ifdef CONFIG_DEV_PAGEMAP_OPS > void __put_devmap_managed_page(struct page *page); > DECLARE_STATIC_KEY_FALSE(devmap_managed_key); > diff --git a/mm/gup.c b/mm/gup.c > index 0b05e22ac05f..7a449500f0a6 100644 > --- a/mm/gup.c > +++ b/mm/gup.c > @@ -1005,7 +1005,8 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk, > struct page **pages, > struct vm_area_struct **vmas, > int *locked, > - unsigned int flags) > + unsigned int flags, > + struct vaddr_pin *vaddr_pin) > { > long ret, pages_done; > bool lock_dropped; > @@ -1165,7 +1166,8 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, > > return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, > locked, > - gup_flags | FOLL_TOUCH | FOLL_REMOTE); > + gup_flags | FOLL_TOUCH | FOLL_REMOTE, > + NULL); > } > EXPORT_SYMBOL(get_user_pages_remote); > > @@ -1320,7 +1322,8 @@ static long __get_user_pages_locked(struct task_struct *tsk, > struct mm_struct *mm, unsigned long start, > unsigned long nr_pages, struct page **pages, > struct vm_area_struct **vmas, int *locked, > - unsigned int foll_flags) > + unsigned int foll_flags, > + struct vaddr_pin *vaddr_pin) > { > struct vm_area_struct *vma; > unsigned long vm_flags; > @@ -1504,7 +1507,7 @@ static long check_and_migrate_cma_pages(struct task_struct *tsk, > */ > nr_pages = __get_user_pages_locked(tsk, mm, start, nr_pages, > pages, vmas, NULL, > - gup_flags); > + gup_flags, NULL); > > if ((nr_pages > 0) && migrate_allow) { > drain_allow = true; > @@ -1537,7 +1540,8 @@ static long __gup_longterm_locked(struct task_struct *tsk, > unsigned long nr_pages, > struct page **pages, > struct vm_area_struct **vmas, > - unsigned int gup_flags) > + unsigned int gup_flags, > + struct vaddr_pin *vaddr_pin) > { > struct vm_area_struct **vmas_tmp = vmas; > unsigned long flags = 0; > @@ -1558,7 +1562,7 @@ static long __gup_longterm_locked(struct task_struct *tsk, > } > > rc = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, > - vmas_tmp, NULL, gup_flags); > + vmas_tmp, NULL, gup_flags, vaddr_pin); > > if (gup_flags & FOLL_LONGTERM) { > memalloc_nocma_restore(flags); > @@ -1588,10 +1592,11 @@ static __always_inline long __gup_longterm_locked(struct task_struct *tsk, > unsigned long nr_pages, > struct page **pages, > struct vm_area_struct **vmas, > - unsigned int flags) > + unsigned int flags, > + struct vaddr_pin *vaddr_pin) > { > return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, > - NULL, flags); > + NULL, flags, vaddr_pin); > } > #endif /* CONFIG_FS_DAX || CONFIG_CMA */ > > @@ -1607,7 +1612,8 @@ long get_user_pages(unsigned long start, unsigned long nr_pages, > struct vm_area_struct **vmas) > { > return __gup_longterm_locked(current, current->mm, start, nr_pages, > - pages, vmas, gup_flags | FOLL_TOUCH); > + pages, vmas, gup_flags | FOLL_TOUCH, > + NULL); > } > EXPORT_SYMBOL(get_user_pages); > > @@ -1647,7 +1653,7 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages, > > return __get_user_pages_locked(current, current->mm, start, nr_pages, > pages, NULL, locked, > - gup_flags | FOLL_TOUCH); > + gup_flags | FOLL_TOUCH, NULL); > } > EXPORT_SYMBOL(get_user_pages_locked); > > @@ -1684,7 +1690,7 @@ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, > > down_read(&mm->mmap_sem); > ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL, > - &locked, gup_flags | FOLL_TOUCH); > + &locked, gup_flags | FOLL_TOUCH, NULL); > if (locked) > up_read(&mm->mmap_sem); > return ret; > @@ -2377,7 +2383,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, > EXPORT_SYMBOL_GPL(__get_user_pages_fast); > > static int __gup_longterm_unlocked(unsigned long start, int nr_pages, > - unsigned int gup_flags, struct page **pages) > + unsigned int gup_flags, struct page **pages, > + struct vaddr_pin *vaddr_pin) > { > int ret; > > @@ -2389,7 +2396,8 @@ static int __gup_longterm_unlocked(unsigned long start, int nr_pages, > down_read(¤t->mm->mmap_sem); > ret = __gup_longterm_locked(current, current->mm, > start, nr_pages, > - pages, NULL, gup_flags); > + pages, NULL, gup_flags, > + vaddr_pin); > up_read(¤t->mm->mmap_sem); > } else { > ret = get_user_pages_unlocked(start, nr_pages, > @@ -2448,7 +2456,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, > pages += nr; > > ret = __gup_longterm_unlocked(start, nr_pages - nr, > - gup_flags, pages); > + gup_flags, pages, NULL); > > /* Have to be a bit careful with return values */ > if (nr > 0) { >
next prev parent reply other threads:[~2019-08-10 0:06 UTC|newest] Thread overview: 118+ messages / expand[flat|nested] mbox.gz Atom feed top 2019-08-09 22:58 [RFC PATCH v2 00/19] RDMA/FS DAX truncate proposal V1,000,002 ;-) ira.weiny 2019-08-09 22:58 ` [RFC PATCH v2 01/19] fs/locks: Export F_LAYOUT lease to user space ira.weiny 2019-08-09 23:52 ` Dave Chinner 2019-08-12 17:36 ` Ira Weiny 2019-08-14 8:05 ` Dave Chinner 2019-08-14 11:21 ` Jeff Layton 2019-08-14 11:38 ` Dave Chinner 2019-08-09 22:58 ` [RFC PATCH v2 02/19] fs/locks: Add Exclusive flag to user Layout lease ira.weiny 2019-08-14 14:15 ` Jeff Layton 2019-08-14 21:56 ` Dave Chinner 2019-08-26 10:41 ` Jeff Layton 2019-08-29 23:34 ` Ira Weiny 2019-09-04 12:52 ` Jeff Layton 2019-09-04 23:12 ` John Hubbard 2019-08-09 22:58 ` [RFC PATCH v2 03/19] mm/gup: Pass flags down to __gup_device_huge* calls ira.weiny 2019-08-09 22:58 ` [RFC PATCH v2 04/19] mm/gup: Ensure F_LAYOUT lease is held prior to GUP'ing pages ira.weiny 2019-08-09 22:58 ` [RFC PATCH v2 05/19] fs/ext4: Teach ext4 to break layout leases ira.weiny 2019-08-09 22:58 ` [RFC PATCH v2 06/19] fs/ext4: Teach dax_layout_busy_page() to operate on a sub-range ira.weiny 2019-08-23 15:18 ` Vivek Goyal 2019-08-29 18:52 ` Ira Weiny 2019-08-09 22:58 ` [RFC PATCH v2 07/19] fs/xfs: Teach xfs to use new dax_layout_busy_page() ira.weiny 2019-08-09 23:30 ` Dave Chinner 2019-08-12 18:05 ` Ira Weiny 2019-08-14 8:04 ` Dave Chinner 2019-08-09 22:58 ` [RFC PATCH v2 08/19] fs/xfs: Fail truncate if page lease can't be broken ira.weiny 2019-08-09 23:22 ` Dave Chinner 2019-08-12 18:08 ` Ira Weiny 2019-08-09 22:58 ` [RFC PATCH v2 09/19] mm/gup: Introduce vaddr_pin structure ira.weiny 2019-08-10 0:06 ` John Hubbard [this message] 2019-08-10 0:06 ` John Hubbard 2019-08-09 22:58 ` [RFC PATCH v2 10/19] mm/gup: Pass a NULL vaddr_pin through GUP fast ira.weiny 2019-08-10 0:06 ` John Hubbard 2019-08-10 0:06 ` John Hubbard 2019-08-09 22:58 ` [RFC PATCH v2 11/19] mm/gup: Pass follow_page_context further down the call stack ira.weiny 2019-08-10 0:18 ` John Hubbard 2019-08-10 0:18 ` John Hubbard 2019-08-12 19:01 ` Ira Weiny 2019-08-09 22:58 ` [RFC PATCH v2 12/19] mm/gup: Prep put_user_pages() to take an vaddr_pin struct ira.weiny 2019-08-10 0:30 ` John Hubbard 2019-08-10 0:30 ` John Hubbard 2019-08-12 20:46 ` Ira Weiny 2019-08-09 22:58 ` [RFC PATCH v2 13/19] {mm,file}: Add file_pins objects ira.weiny 2019-08-09 22:58 ` [RFC PATCH v2 14/19] fs/locks: Associate file pins while performing GUP ira.weiny 2019-08-09 22:58 ` [RFC PATCH v2 15/19] mm/gup: Introduce vaddr_pin_pages() ira.weiny 2019-08-10 0:09 ` John Hubbard 2019-08-10 0:09 ` John Hubbard 2019-08-12 21:00 ` Ira Weiny 2019-08-12 21:20 ` John Hubbard 2019-08-12 21:20 ` John Hubbard 2019-08-11 23:07 ` John Hubbard 2019-08-11 23:07 ` John Hubbard 2019-08-12 21:01 ` Ira Weiny 2019-08-12 12:28 ` Jason Gunthorpe 2019-08-12 21:48 ` Ira Weiny 2019-08-13 11:47 ` Jason Gunthorpe 2019-08-13 17:46 ` Ira Weiny 2019-08-13 17:56 ` John Hubbard 2019-08-13 17:56 ` John Hubbard 2019-08-09 22:58 ` [RFC PATCH v2 16/19] RDMA/uverbs: Add back pointer to system file object ira.weiny 2019-08-12 13:00 ` Jason Gunthorpe 2019-08-12 17:28 ` Ira Weiny 2019-08-12 17:56 ` Jason Gunthorpe 2019-08-12 21:15 ` Ira Weiny 2019-08-13 11:48 ` Jason Gunthorpe 2019-08-13 17:41 ` Ira Weiny 2019-08-13 18:00 ` Jason Gunthorpe 2019-08-13 20:38 ` Ira Weiny 2019-08-14 12:23 ` Jason Gunthorpe 2019-08-14 17:50 ` Ira Weiny 2019-08-14 18:15 ` Jason Gunthorpe 2019-09-04 22:25 ` Ira Weiny 2019-09-11 8:19 ` Jason Gunthorpe 2019-08-09 22:58 ` [RFC PATCH v2 17/19] RDMA/umem: Convert to vaddr_[pin|unpin]* operations ira.weiny 2019-08-09 22:58 ` [RFC PATCH v2 18/19] {mm,procfs}: Add display file_pins proc ira.weiny 2019-08-09 22:58 ` [RFC PATCH v2 19/19] mm/gup: Remove FOLL_LONGTERM DAX exclusion ira.weiny 2019-08-14 10:17 ` [RFC PATCH v2 00/19] RDMA/FS DAX truncate proposal V1,000,002 ;-) Jan Kara 2019-08-14 18:08 ` Ira Weiny 2019-08-15 13:05 ` Jan Kara 2019-08-16 19:05 ` Ira Weiny 2019-08-16 23:20 ` [RFC PATCH v2 00/19] RDMA/FS DAX truncate proposal V1,000,002 ; -) Ira Weiny 2019-08-19 6:36 ` Jan Kara 2019-08-17 2:26 ` [RFC PATCH v2 00/19] RDMA/FS DAX truncate proposal V1,000,002 ;-) Dave Chinner 2019-08-19 6:34 ` Jan Kara 2019-08-19 9:24 ` Dave Chinner 2019-08-19 12:38 ` Jason Gunthorpe 2019-08-19 21:53 ` Ira Weiny 2019-08-20 1:12 ` Dave Chinner 2019-08-20 11:55 ` Jason Gunthorpe 2019-08-21 18:02 ` Ira Weiny 2019-08-21 18:13 ` Jason Gunthorpe 2019-08-21 18:22 ` John Hubbard 2019-08-21 18:57 ` Ira Weiny 2019-08-21 19:06 ` Ira Weiny 2019-08-21 19:48 ` Jason Gunthorpe 2019-08-21 20:44 ` Ira Weiny 2019-08-21 23:49 ` Jason Gunthorpe 2019-08-23 3:23 ` Dave Chinner 2019-08-23 12:04 ` Jason Gunthorpe 2019-08-24 0:11 ` Dave Chinner 2019-08-24 5:08 ` Ira Weiny 2019-08-26 5:55 ` Dave Chinner 2019-08-29 2:02 ` Ira Weiny 2019-08-29 3:27 ` John Hubbard 2019-08-29 16:16 ` Ira Weiny 2019-09-02 22:26 ` Dave Chinner 2019-09-04 16:54 ` Ira Weiny 2019-08-25 19:39 ` Jason Gunthorpe 2019-08-24 4:49 ` Ira Weiny 2019-08-25 19:40 ` Jason Gunthorpe 2019-08-23 0:59 ` Dave Chinner 2019-08-23 17:15 ` Ira Weiny 2019-08-24 0:18 ` Dave Chinner 2019-08-20 0:05 ` John Hubbard 2019-08-20 1:20 ` Dave Chinner 2019-08-20 3:09 ` John Hubbard 2019-08-20 3:36 ` Dave Chinner 2019-08-21 18:43 ` John Hubbard 2019-08-21 19:09 ` Ira Weiny
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=e92723cf-97a1-9860-9482-8466ff2feaa8@nvidia.com \ --to=jhubbard@nvidia.com \ --cc=akpm@linux-foundation.org \ --cc=dan.j.williams@intel.com \ --cc=david@fromorbit.com \ --cc=ira.weiny@intel.com \ --cc=jack@suse.cz \ --cc=jgg@ziepe.ca \ --cc=linux-ext4@vger.kernel.org \ --cc=linux-fsdevel@vger.kernel.org \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-mm@kvack.org \ --cc=linux-nvdimm@lists.01.org \ --cc=linux-rdma@vger.kernel.org \ --cc=linux-xfs@vger.kernel.org \ --cc=mhocko@suse.com \ --cc=tytso@mit.edu \ --cc=willy@infradead.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).