All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Wilcox <willy@infradead.org>
To: linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	linux-kernel@vger.kernel.org
Cc: Matthew Wilcox <willy@infradead.org>, Jan Kara <jack@suse.cz>,
	Jeff Layton <jlayton@redhat.com>,
	Lukas Czerner <lczerner@redhat.com>,
	Ross Zwisler <ross.zwisler@linux.intel.com>,
	Christoph Hellwig <hch@lst.de>,
	Goldwyn Rodrigues <rgoldwyn@suse.com>,
	Nicholas Piggin <npiggin@gmail.com>,
	Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>,
	linux-nilfs@vger.kernel.org, Jaegeuk Kim <jaegeuk@kernel.org>,
	Chao Yu <yuchao0@huawei.com>,
	linux-f2fs-devel@lists.sourceforge.net
Subject: [PATCH v14 62/74] dax: Rename some functions
Date: Sat, 16 Jun 2018 19:00:40 -0700	[thread overview]
Message-ID: <20180617020052.4759-63-willy@infradead.org> (raw)
In-Reply-To: <20180617020052.4759-1-willy@infradead.org>

Remove mentions of 'radix' and 'radix tree'.  Simplify some names by
dropping the word 'mapping'.

Signed-off-by: Matthew Wilcox <willy@infradead.org>
---
 fs/dax.c | 86 +++++++++++++++++++++++++++-----------------------------
 1 file changed, 42 insertions(+), 44 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index 704059ecfa88..157762fe2ba1 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -74,18 +74,18 @@ fs_initcall(init_dax_wait_table);
 #define DAX_ZERO_PAGE	(1UL << 2)
 #define DAX_EMPTY	(1UL << 3)
 
-static unsigned long dax_radix_pfn(void *entry)
+static unsigned long dax_to_pfn(void *entry)
 {
 	return xa_to_value(entry) >> DAX_SHIFT;
 }
 
-static void *dax_radix_locked_entry(unsigned long pfn, unsigned long flags)
+static void *dax_make_locked(unsigned long pfn, unsigned long flags)
 {
 	return xa_mk_value(flags | ((unsigned long)pfn << DAX_SHIFT) |
 			DAX_LOCKED);
 }
 
-static unsigned int dax_radix_order(void *entry)
+static unsigned int dax_entry_order(void *entry)
 {
 	if (xa_to_value(entry) & DAX_PMD)
 		return PMD_SHIFT - PAGE_SHIFT;
@@ -113,7 +113,7 @@ static int dax_is_empty_entry(void *entry)
 }
 
 /*
- * DAX radix tree locking
+ * DAX page cache entry locking
  */
 struct exceptional_entry_key {
 	struct address_space *mapping;
@@ -217,7 +217,7 @@ static inline void *unlock_slot(struct address_space *mapping, void **slot)
 }
 
 /*
- * Lookup entry in radix tree, wait for it to become unlocked if it is
+ * Lookup entry in page cache, wait for it to become unlocked if it is
  * a DAX entry and return it. The caller must call
  * put_unlocked_mapping_entry() when he decided not to lock the entry or
  * put_locked_mapping_entry() when he locked the entry and now wants to
@@ -280,7 +280,7 @@ static void put_locked_mapping_entry(struct address_space *mapping,
 }
 
 /*
- * Called when we are done with radix tree entry we looked up via
+ * Called when we are done with page cache entry we looked up via
  * get_unlocked_mapping_entry() and which we didn't lock in the end.
  */
 static void put_unlocked_mapping_entry(struct address_space *mapping,
@@ -289,7 +289,7 @@ static void put_unlocked_mapping_entry(struct address_space *mapping,
 	if (!entry)
 		return;
 
-	/* We have to wake up next waiter for the radix tree entry lock */
+	/* We have to wake up next waiter for the page cache entry lock */
 	dax_wake_mapping_entry_waiter(mapping, index, entry, false);
 }
 
@@ -305,9 +305,9 @@ static unsigned long dax_entry_size(void *entry)
 		return PAGE_SIZE;
 }
 
-static unsigned long dax_radix_end_pfn(void *entry)
+static unsigned long dax_end_pfn(void *entry)
 {
-	return dax_radix_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
+	return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
 }
 
 /*
@@ -315,8 +315,8 @@ static unsigned long dax_radix_end_pfn(void *entry)
  * 'empty' and 'zero' entries.
  */
 #define for_each_mapped_pfn(entry, pfn) \
-	for (pfn = dax_radix_pfn(entry); \
-			pfn < dax_radix_end_pfn(entry); pfn++)
+	for (pfn = dax_to_pfn(entry); \
+			pfn < dax_end_pfn(entry); pfn++)
 
 /*
  * TODO: for reflink+dax we need a way to associate a single page with
@@ -449,9 +449,9 @@ void dax_unlock_page(struct page *page)
 }
 
 /*
- * Find radix tree entry at given index. If it is a DAX entry, return it
- * with the radix tree entry locked. If the radix tree doesn't contain the
- * given index, create an empty entry for the index and return with it locked.
+ * Find page cache entry at given index. If it is a DAX entry, return it
+ * with the entry locked. If the page cache doesn't contain an entry at
+ * that index, add a locked empty entry.
  *
  * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
  * either return that locked entry or will return an error.  This error will
@@ -560,10 +560,10 @@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
 					true);
 		}
 
-		entry = dax_radix_locked_entry(0, size_flag | DAX_EMPTY);
+		entry = dax_make_locked(0, size_flag | DAX_EMPTY);
 
 		err = __radix_tree_insert(&mapping->i_pages, index,
-				dax_radix_order(entry), entry);
+				dax_entry_order(entry), entry);
 		radix_tree_preload_end();
 		if (err) {
 			xa_unlock_irq(&mapping->i_pages);
@@ -672,7 +672,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
 }
 EXPORT_SYMBOL_GPL(dax_layout_busy_page);
 
-static int __dax_invalidate_mapping_entry(struct address_space *mapping,
+static int __dax_invalidate_entry(struct address_space *mapping,
 					  pgoff_t index, bool trunc)
 {
 	int ret = 0;
@@ -702,12 +702,12 @@ static int __dax_invalidate_mapping_entry(struct address_space *mapping,
  */
 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
 {
-	int ret = __dax_invalidate_mapping_entry(mapping, index, true);
+	int ret = __dax_invalidate_entry(mapping, index, true);
 
 	/*
 	 * This gets called from truncate / punch_hole path. As such, the caller
 	 * must hold locks protecting against concurrent modifications of the
-	 * radix tree (usually fs-private i_mmap_sem for writing). Since the
+	 * page cache (usually fs-private i_mmap_sem for writing). Since the
 	 * caller has seen a DAX entry for this index, we better find it
 	 * at that index as well...
 	 */
@@ -721,7 +721,7 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
 				      pgoff_t index)
 {
-	return __dax_invalidate_mapping_entry(mapping, index, false);
+	return __dax_invalidate_entry(mapping, index, false);
 }
 
 static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
@@ -758,10 +758,9 @@ static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
  * already in the tree, we will skip the insertion and just dirty the PMD as
  * appropriate.
  */
-static void *dax_insert_mapping_entry(struct address_space *mapping,
-				      struct vm_fault *vmf,
-				      void *entry, pfn_t pfn_t,
-				      unsigned long flags, bool dirty)
+static void *dax_insert_entry(struct address_space *mapping,
+		struct vm_fault *vmf,
+		void *entry, pfn_t pfn_t, unsigned long flags, bool dirty)
 {
 	struct radix_tree_root *pages = &mapping->i_pages;
 	unsigned long pfn = pfn_t_to_pfn(pfn_t);
@@ -781,7 +780,7 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
 	}
 
 	xa_lock_irq(pages);
-	new_entry = dax_radix_locked_entry(pfn, flags);
+	new_entry = dax_make_locked(pfn, flags);
 	if (dax_entry_size(entry) != dax_entry_size(new_entry)) {
 		dax_disassociate_entry(entry, mapping, false);
 		dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
@@ -789,9 +788,9 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
 
 	if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
 		/*
-		 * Only swap our new entry into the radix tree if the current
+		 * Only swap our new entry into the page cache if the current
 		 * entry is a zero page or an empty entry.  If a normal PTE or
-		 * PMD entry is already in the tree, we leave it alone.  This
+		 * PMD entry is already in the cache, we leave it alone.  This
 		 * means that if we are trying to insert a PTE and the
 		 * existing entry is a PMD, we will just leave the PMD in the
 		 * tree and dirty it if necessary.
@@ -814,8 +813,8 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
 	return entry;
 }
 
-static inline unsigned long
-pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
+static inline
+unsigned long pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
 {
 	unsigned long address;
 
@@ -825,8 +824,8 @@ pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
 }
 
 /* Walk all mappings of a given index of a file and writeprotect them */
-static void dax_mapping_entry_mkclean(struct address_space *mapping,
-				      pgoff_t index, unsigned long pfn)
+static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
+		unsigned long pfn)
 {
 	struct vm_area_struct *vma;
 	pte_t pte, *ptep = NULL;
@@ -922,7 +921,7 @@ static int dax_writeback_one(struct dax_device *dax_dev,
 	 * compare pfns as we must not bail out due to difference in lockbit
 	 * or entry type.
 	 */
-	if (dax_radix_pfn(entry2) != dax_radix_pfn(entry))
+	if (dax_to_pfn(entry2) != dax_to_pfn(entry))
 		goto put_unlocked;
 	if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
 				dax_is_zero_entry(entry))) {
@@ -952,10 +951,10 @@ static int dax_writeback_one(struct dax_device *dax_dev,
 	 * This allows us to flush for PMD_SIZE and not have to worry about
 	 * partial PMD writebacks.
 	 */
-	pfn = dax_radix_pfn(entry);
-	size = PAGE_SIZE << dax_radix_order(entry);
+	pfn = dax_to_pfn(entry);
+	size = PAGE_SIZE << dax_entry_order(entry);
 
-	dax_mapping_entry_mkclean(mapping, index, pfn);
+	dax_entry_mkclean(mapping, index, pfn);
 	dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size);
 	/*
 	 * After we have flushed the cache, we can clear the dirty tag. There
@@ -1093,7 +1092,7 @@ static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry,
 	pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
 	vm_fault_t ret;
 
-	dax_insert_mapping_entry(mapping, vmf, entry, pfn,
+	dax_insert_entry(mapping, vmf, entry, pfn,
 			DAX_ZERO_PAGE, false);
 
 	ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
@@ -1407,7 +1406,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 		if (error < 0)
 			goto error_finish_iomap;
 
-		entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
+		entry = dax_insert_entry(mapping, vmf, entry, pfn,
 						 0, write && !sync);
 
 		/*
@@ -1487,7 +1486,7 @@ static vm_fault_t dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
 		goto fallback;
 
 	pfn = page_to_pfn_t(zero_page);
-	ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
+	ret = dax_insert_entry(mapping, vmf, entry, pfn,
 			DAX_PMD | DAX_ZERO_PAGE, false);
 
 	ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
@@ -1540,7 +1539,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 	 * Make sure that the faulting address's PMD offset (color) matches
 	 * the PMD offset from the start of the file.  This is necessary so
 	 * that a PMD range in the page table overlaps exactly with a PMD
-	 * range in the radix tree.
+	 * range in the page cache.
 	 */
 	if ((vmf->pgoff & PG_PMD_COLOUR) !=
 	    ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
@@ -1608,7 +1607,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 		if (error < 0)
 			goto finish_iomap;
 
-		entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
+		entry = dax_insert_entry(mapping, vmf, entry, pfn,
 						DAX_PMD, write && !sync);
 
 		/*
@@ -1701,15 +1700,14 @@ vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
 }
 EXPORT_SYMBOL_GPL(dax_iomap_fault);
 
-/**
+/*
  * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
  * @vmf: The description of the fault
  * @pe_size: Size of entry to be inserted
  * @pfn: PFN to insert
  *
- * This function inserts writeable PTE or PMD entry into page tables for mmaped
- * DAX file.  It takes care of marking corresponding radix tree entry as dirty
- * as well.
+ * This function inserts a writeable PTE or PMD entry into the page tables
+ * for an mmaped DAX file.  It also marks the page cache entry as dirty.
  */
 static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf,
 				  enum page_entry_size pe_size,
-- 
2.17.1


WARNING: multiple messages have this Message-ID (diff)
From: Matthew Wilcox <willy@infradead.org>
To: linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	linux-kernel@vger.kernel.org
Cc: linux-nilfs@vger.kernel.org, Jan Kara <jack@suse.cz>,
	Jeff Layton <jlayton@redhat.com>,
	Jaegeuk Kim <jaegeuk@kernel.org>,
	Matthew Wilcox <willy@infradead.org>,
	linux-f2fs-devel@lists.sourceforge.net,
	Nicholas Piggin <npiggin@gmail.com>,
	Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>,
	Lukas Czerner <lczerner@redhat.com>,
	Ross Zwisler <ross.zwisler@linux.intel.com>,
	Christoph Hellwig <hch@lst.de>,
	Goldwyn Rodrigues <rgoldwyn@suse.com>
Subject: [PATCH v14 62/74] dax: Rename some functions
Date: Sat, 16 Jun 2018 19:00:40 -0700	[thread overview]
Message-ID: <20180617020052.4759-63-willy@infradead.org> (raw)
In-Reply-To: <20180617020052.4759-1-willy@infradead.org>

Remove mentions of 'radix' and 'radix tree'.  Simplify some names by
dropping the word 'mapping'.

Signed-off-by: Matthew Wilcox <willy@infradead.org>
---
 fs/dax.c | 86 +++++++++++++++++++++++++++-----------------------------
 1 file changed, 42 insertions(+), 44 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index 704059ecfa88..157762fe2ba1 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -74,18 +74,18 @@ fs_initcall(init_dax_wait_table);
 #define DAX_ZERO_PAGE	(1UL << 2)
 #define DAX_EMPTY	(1UL << 3)
 
-static unsigned long dax_radix_pfn(void *entry)
+static unsigned long dax_to_pfn(void *entry)
 {
 	return xa_to_value(entry) >> DAX_SHIFT;
 }
 
-static void *dax_radix_locked_entry(unsigned long pfn, unsigned long flags)
+static void *dax_make_locked(unsigned long pfn, unsigned long flags)
 {
 	return xa_mk_value(flags | ((unsigned long)pfn << DAX_SHIFT) |
 			DAX_LOCKED);
 }
 
-static unsigned int dax_radix_order(void *entry)
+static unsigned int dax_entry_order(void *entry)
 {
 	if (xa_to_value(entry) & DAX_PMD)
 		return PMD_SHIFT - PAGE_SHIFT;
@@ -113,7 +113,7 @@ static int dax_is_empty_entry(void *entry)
 }
 
 /*
- * DAX radix tree locking
+ * DAX page cache entry locking
  */
 struct exceptional_entry_key {
 	struct address_space *mapping;
@@ -217,7 +217,7 @@ static inline void *unlock_slot(struct address_space *mapping, void **slot)
 }
 
 /*
- * Lookup entry in radix tree, wait for it to become unlocked if it is
+ * Lookup entry in page cache, wait for it to become unlocked if it is
  * a DAX entry and return it. The caller must call
  * put_unlocked_mapping_entry() when he decided not to lock the entry or
  * put_locked_mapping_entry() when he locked the entry and now wants to
@@ -280,7 +280,7 @@ static void put_locked_mapping_entry(struct address_space *mapping,
 }
 
 /*
- * Called when we are done with radix tree entry we looked up via
+ * Called when we are done with page cache entry we looked up via
  * get_unlocked_mapping_entry() and which we didn't lock in the end.
  */
 static void put_unlocked_mapping_entry(struct address_space *mapping,
@@ -289,7 +289,7 @@ static void put_unlocked_mapping_entry(struct address_space *mapping,
 	if (!entry)
 		return;
 
-	/* We have to wake up next waiter for the radix tree entry lock */
+	/* We have to wake up next waiter for the page cache entry lock */
 	dax_wake_mapping_entry_waiter(mapping, index, entry, false);
 }
 
@@ -305,9 +305,9 @@ static unsigned long dax_entry_size(void *entry)
 		return PAGE_SIZE;
 }
 
-static unsigned long dax_radix_end_pfn(void *entry)
+static unsigned long dax_end_pfn(void *entry)
 {
-	return dax_radix_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
+	return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
 }
 
 /*
@@ -315,8 +315,8 @@ static unsigned long dax_radix_end_pfn(void *entry)
  * 'empty' and 'zero' entries.
  */
 #define for_each_mapped_pfn(entry, pfn) \
-	for (pfn = dax_radix_pfn(entry); \
-			pfn < dax_radix_end_pfn(entry); pfn++)
+	for (pfn = dax_to_pfn(entry); \
+			pfn < dax_end_pfn(entry); pfn++)
 
 /*
  * TODO: for reflink+dax we need a way to associate a single page with
@@ -449,9 +449,9 @@ void dax_unlock_page(struct page *page)
 }
 
 /*
- * Find radix tree entry at given index. If it is a DAX entry, return it
- * with the radix tree entry locked. If the radix tree doesn't contain the
- * given index, create an empty entry for the index and return with it locked.
+ * Find page cache entry at given index. If it is a DAX entry, return it
+ * with the entry locked. If the page cache doesn't contain an entry at
+ * that index, add a locked empty entry.
  *
  * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
  * either return that locked entry or will return an error.  This error will
@@ -560,10 +560,10 @@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
 					true);
 		}
 
-		entry = dax_radix_locked_entry(0, size_flag | DAX_EMPTY);
+		entry = dax_make_locked(0, size_flag | DAX_EMPTY);
 
 		err = __radix_tree_insert(&mapping->i_pages, index,
-				dax_radix_order(entry), entry);
+				dax_entry_order(entry), entry);
 		radix_tree_preload_end();
 		if (err) {
 			xa_unlock_irq(&mapping->i_pages);
@@ -672,7 +672,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
 }
 EXPORT_SYMBOL_GPL(dax_layout_busy_page);
 
-static int __dax_invalidate_mapping_entry(struct address_space *mapping,
+static int __dax_invalidate_entry(struct address_space *mapping,
 					  pgoff_t index, bool trunc)
 {
 	int ret = 0;
@@ -702,12 +702,12 @@ static int __dax_invalidate_mapping_entry(struct address_space *mapping,
  */
 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
 {
-	int ret = __dax_invalidate_mapping_entry(mapping, index, true);
+	int ret = __dax_invalidate_entry(mapping, index, true);
 
 	/*
 	 * This gets called from truncate / punch_hole path. As such, the caller
 	 * must hold locks protecting against concurrent modifications of the
-	 * radix tree (usually fs-private i_mmap_sem for writing). Since the
+	 * page cache (usually fs-private i_mmap_sem for writing). Since the
 	 * caller has seen a DAX entry for this index, we better find it
 	 * at that index as well...
 	 */
@@ -721,7 +721,7 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
 				      pgoff_t index)
 {
-	return __dax_invalidate_mapping_entry(mapping, index, false);
+	return __dax_invalidate_entry(mapping, index, false);
 }
 
 static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
@@ -758,10 +758,9 @@ static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
  * already in the tree, we will skip the insertion and just dirty the PMD as
  * appropriate.
  */
-static void *dax_insert_mapping_entry(struct address_space *mapping,
-				      struct vm_fault *vmf,
-				      void *entry, pfn_t pfn_t,
-				      unsigned long flags, bool dirty)
+static void *dax_insert_entry(struct address_space *mapping,
+		struct vm_fault *vmf,
+		void *entry, pfn_t pfn_t, unsigned long flags, bool dirty)
 {
 	struct radix_tree_root *pages = &mapping->i_pages;
 	unsigned long pfn = pfn_t_to_pfn(pfn_t);
@@ -781,7 +780,7 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
 	}
 
 	xa_lock_irq(pages);
-	new_entry = dax_radix_locked_entry(pfn, flags);
+	new_entry = dax_make_locked(pfn, flags);
 	if (dax_entry_size(entry) != dax_entry_size(new_entry)) {
 		dax_disassociate_entry(entry, mapping, false);
 		dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
@@ -789,9 +788,9 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
 
 	if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
 		/*
-		 * Only swap our new entry into the radix tree if the current
+		 * Only swap our new entry into the page cache if the current
 		 * entry is a zero page or an empty entry.  If a normal PTE or
-		 * PMD entry is already in the tree, we leave it alone.  This
+		 * PMD entry is already in the cache, we leave it alone.  This
 		 * means that if we are trying to insert a PTE and the
 		 * existing entry is a PMD, we will just leave the PMD in the
 		 * tree and dirty it if necessary.
@@ -814,8 +813,8 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
 	return entry;
 }
 
-static inline unsigned long
-pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
+static inline
+unsigned long pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
 {
 	unsigned long address;
 
@@ -825,8 +824,8 @@ pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
 }
 
 /* Walk all mappings of a given index of a file and writeprotect them */
-static void dax_mapping_entry_mkclean(struct address_space *mapping,
-				      pgoff_t index, unsigned long pfn)
+static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
+		unsigned long pfn)
 {
 	struct vm_area_struct *vma;
 	pte_t pte, *ptep = NULL;
@@ -922,7 +921,7 @@ static int dax_writeback_one(struct dax_device *dax_dev,
 	 * compare pfns as we must not bail out due to difference in lockbit
 	 * or entry type.
 	 */
-	if (dax_radix_pfn(entry2) != dax_radix_pfn(entry))
+	if (dax_to_pfn(entry2) != dax_to_pfn(entry))
 		goto put_unlocked;
 	if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
 				dax_is_zero_entry(entry))) {
@@ -952,10 +951,10 @@ static int dax_writeback_one(struct dax_device *dax_dev,
 	 * This allows us to flush for PMD_SIZE and not have to worry about
 	 * partial PMD writebacks.
 	 */
-	pfn = dax_radix_pfn(entry);
-	size = PAGE_SIZE << dax_radix_order(entry);
+	pfn = dax_to_pfn(entry);
+	size = PAGE_SIZE << dax_entry_order(entry);
 
-	dax_mapping_entry_mkclean(mapping, index, pfn);
+	dax_entry_mkclean(mapping, index, pfn);
 	dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size);
 	/*
 	 * After we have flushed the cache, we can clear the dirty tag. There
@@ -1093,7 +1092,7 @@ static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry,
 	pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
 	vm_fault_t ret;
 
-	dax_insert_mapping_entry(mapping, vmf, entry, pfn,
+	dax_insert_entry(mapping, vmf, entry, pfn,
 			DAX_ZERO_PAGE, false);
 
 	ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
@@ -1407,7 +1406,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 		if (error < 0)
 			goto error_finish_iomap;
 
-		entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
+		entry = dax_insert_entry(mapping, vmf, entry, pfn,
 						 0, write && !sync);
 
 		/*
@@ -1487,7 +1486,7 @@ static vm_fault_t dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
 		goto fallback;
 
 	pfn = page_to_pfn_t(zero_page);
-	ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
+	ret = dax_insert_entry(mapping, vmf, entry, pfn,
 			DAX_PMD | DAX_ZERO_PAGE, false);
 
 	ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
@@ -1540,7 +1539,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 	 * Make sure that the faulting address's PMD offset (color) matches
 	 * the PMD offset from the start of the file.  This is necessary so
 	 * that a PMD range in the page table overlaps exactly with a PMD
-	 * range in the radix tree.
+	 * range in the page cache.
 	 */
 	if ((vmf->pgoff & PG_PMD_COLOUR) !=
 	    ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
@@ -1608,7 +1607,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 		if (error < 0)
 			goto finish_iomap;
 
-		entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
+		entry = dax_insert_entry(mapping, vmf, entry, pfn,
 						DAX_PMD, write && !sync);
 
 		/*
@@ -1701,15 +1700,14 @@ vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
 }
 EXPORT_SYMBOL_GPL(dax_iomap_fault);
 
-/**
+/*
  * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
  * @vmf: The description of the fault
  * @pe_size: Size of entry to be inserted
  * @pfn: PFN to insert
  *
- * This function inserts writeable PTE or PMD entry into page tables for mmaped
- * DAX file.  It takes care of marking corresponding radix tree entry as dirty
- * as well.
+ * This function inserts a writeable PTE or PMD entry into the page tables
+ * for an mmaped DAX file.  It also marks the page cache entry as dirty.
  */
 static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf,
 				  enum page_entry_size pe_size,
-- 
2.17.1


------------------------------------------------------------------------------
Check out the vibrant tech community on one of the world's most
engaging tech sites, Slashdot.org! http://sdm.link/slashdot

  parent reply	other threads:[~2018-06-17  2:03 UTC|newest]

Thread overview: 162+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-06-17  1:59 [PATCH v14 00/74] Convert page cache to XArray Matthew Wilcox
2018-06-17  1:59 ` Matthew Wilcox
2018-06-17  1:59 ` [PATCH v14 01/74] Update email address Matthew Wilcox
2018-06-17  1:59   ` Matthew Wilcox
2018-06-17  1:59 ` [PATCH v14 02/74] radix tree test suite: Enable ubsan Matthew Wilcox
2018-06-17  1:59 ` [PATCH v14 03/74] dax: Fix use of zero page Matthew Wilcox
2018-06-17  1:59 ` [PATCH v14 04/74] xarray: Replace exceptional entries Matthew Wilcox
2018-06-17  1:59   ` Matthew Wilcox
2018-06-17  1:59 ` [PATCH v14 05/74] xarray: Change definition of sibling entries Matthew Wilcox
2018-06-17  1:59   ` Matthew Wilcox
2018-06-17  1:59 ` [PATCH v14 06/74] xarray: Add definition of struct xarray Matthew Wilcox
2018-06-17  1:59   ` Matthew Wilcox
2018-06-17  1:59 ` [PATCH v14 07/74] xarray: Define struct xa_node Matthew Wilcox
2018-06-17  1:59   ` Matthew Wilcox
2018-06-17  1:59 ` [PATCH v14 08/74] xarray: Add documentation Matthew Wilcox
2018-06-17  1:59   ` Matthew Wilcox
2018-06-17  1:59 ` [PATCH v14 09/74] xarray: Add XArray load operation Matthew Wilcox
2018-06-17  1:59   ` Matthew Wilcox
2018-06-17  1:59 ` [PATCH v14 10/74] xarray: Add XArray tags Matthew Wilcox
2018-06-17  1:59   ` Matthew Wilcox
2018-06-17  1:59 ` [PATCH v14 11/74] xarray: Add XArray unconditional store operations Matthew Wilcox
2018-06-17  1:59   ` Matthew Wilcox
2018-06-17  1:59 ` [PATCH v14 12/74] xarray: Add XArray conditional " Matthew Wilcox
2018-06-17  1:59   ` Matthew Wilcox
2018-06-17  1:59 ` [PATCH v14 13/74] xarray: Add XArray iterators Matthew Wilcox
2018-06-17  1:59   ` Matthew Wilcox
2018-06-17  1:59 ` [PATCH v14 14/74] xarray: Extract entries from an XArray Matthew Wilcox
2018-06-17  1:59   ` Matthew Wilcox
2018-06-17  1:59 ` [PATCH v14 15/74] xarray: Destroy " Matthew Wilcox
2018-06-17  1:59   ` Matthew Wilcox
2018-06-17  1:59 ` [PATCH v14 16/74] xarray: Step through " Matthew Wilcox
2018-06-17  1:59 ` [PATCH v14 17/74] xarray: Add xas_for_each_conflict Matthew Wilcox
2018-06-17  1:59   ` Matthew Wilcox
2018-06-17  1:59 ` [PATCH v14 18/74] xarray: Add xas_create_range Matthew Wilcox
2018-06-17  1:59   ` Matthew Wilcox
2018-06-17  1:59 ` [PATCH v14 19/74] xarray: Add MAINTAINERS entry Matthew Wilcox
2018-06-17  1:59   ` Matthew Wilcox
2018-06-17  1:59 ` [PATCH v14 20/74] page cache: Rearrange address_space Matthew Wilcox
2018-06-17  1:59   ` Matthew Wilcox
2018-06-17  1:59 ` [PATCH v14 21/74] page cache: Convert hole search to XArray Matthew Wilcox
2018-06-17  1:59   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 22/74] page cache: Add and replace pages using the XArray Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 23/74] page cache: Convert page deletion to XArray Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 24/74] page cache: Convert find_get_entry " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 25/74] page cache: Convert find_get_entries " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 26/74] page cache: Convert find_get_pages_range " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 27/74] page cache: Convert find_get_pages_contig " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 28/74] page cache; Convert find_get_pages_range_tag " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 29/74] page cache: Convert find_get_entries_tag " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 30/74] page cache: Convert filemap_map_pages " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 31/74] radix tree test suite: Convert regression1 " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 32/74] page cache: Convert delete_batch " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 33/74] page cache: Remove stray radix comment Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 34/74] page cache: Convert filemap_range_has_page to XArray Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 35/74] mm: Convert page-writeback " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 36/74] mm: Convert workingset " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 37/74] mm: Convert truncate " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 38/74] mm: Convert add_to_swap_cache " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 39/74] mm: Convert delete_from_swap_cache " Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 40/74] mm: Convert __do_page_cache_readahead " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 41/74] mm: Convert page migration " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 42/74] mm: Convert huge_memory " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 43/74] mm: Convert collapse_shmem " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 44/74] mm: Convert khugepaged_scan_shmem " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 45/74] mm: Convert is_page_cache_freeable " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 46/74] pagevec: Use xa_tag_t Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 47/74] shmem: Convert shmem_radix_tree_replace to XArray Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 48/74] shmem: Convert shmem_confirm_swap " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 49/74] shmem: Convert find_swap_entry " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 50/74] shmem: Convert shmem_add_to_page_cache " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 51/74] shmem: Convert shmem_alloc_hugepage " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 52/74] shmem: Convert shmem_free_swap " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 53/74] shmem: Convert shmem_partial_swap_usage " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 54/74] memfd: Convert memfd_wait_for_pins " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 55/74] memfd: Convert memfd_tag_pins " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 56/74] shmem: Comment fixups Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 57/74] btrfs: Convert page cache to XArray Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 58/74] fs: Convert buffer " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 59/74] fs: Convert writeback " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 60/74] nilfs2: Convert " Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 61/74] f2fs: " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` Matthew Wilcox [this message]
2018-06-17  2:00   ` [PATCH v14 62/74] dax: Rename some functions Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 63/74] dax: Hash on XArray instead of mapping Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 64/74] dax: Convert dax_insert_pfn_mkwrite to XArray Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 65/74] dax: Convert dax_layout_busy_page " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 66/74] dax: Convert __dax_invalidate_entry " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 67/74] dax: Convert dax writeback " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 68/74] dax: Convert dax_lock_page " Matthew Wilcox
2018-06-29 17:30   ` Ross Zwisler
2018-06-29 17:30     ` Ross Zwisler
2018-07-06 11:54     ` [PATCH] fs/dax: remove unused function Anders Roxell
2018-07-06 13:04     ` [PATCH v14 68/74] dax: Convert dax_lock_page to XArray Matthew Wilcox
2018-07-06 13:04       ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 69/74] dax: Convert page fault handlers " Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 70/74] page cache: Finish XArray conversion Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 71/74] radix tree: Remove radix_tree_update_node_t Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 72/74] radix tree: Remove split/join code Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 73/74] radix tree: Remove radix_tree_maybe_preload_order Matthew Wilcox
2018-06-17  2:00 ` [PATCH v14 74/74] radix tree: Remove radix_tree_clear_tags Matthew Wilcox
2018-06-17  2:00   ` Matthew Wilcox
2018-06-19  3:12 ` [PATCH v14 00/74] Convert page cache to XArray Ross Zwisler
2018-06-19  9:22   ` Matthew Wilcox
2018-06-19 16:40     ` Ross Zwisler
2018-06-19 17:16       ` Matthew Wilcox
2018-06-27 11:05         ` Matthew Wilcox
2018-06-27 11:05           ` Matthew Wilcox
2018-06-27 19:44           ` Ross Zwisler
2018-06-28  8:39             ` Matthew Wilcox
2018-06-28 16:30               ` Ross Zwisler
2018-07-25 21:03             ` Matthew Wilcox
2018-07-25 21:12               ` Ross Zwisler
2018-07-27 17:20               ` Ross Zwisler
2018-07-27 17:20                 ` Ross Zwisler
2018-07-30 15:43                 ` Matthew Wilcox

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180617020052.4759-63-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=hch@lst.de \
    --cc=jack@suse.cz \
    --cc=jaegeuk@kernel.org \
    --cc=jlayton@redhat.com \
    --cc=konishi.ryusuke@lab.ntt.co.jp \
    --cc=lczerner@redhat.com \
    --cc=linux-f2fs-devel@lists.sourceforge.net \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nilfs@vger.kernel.org \
    --cc=npiggin@gmail.com \
    --cc=rgoldwyn@suse.com \
    --cc=ross.zwisler@linux.intel.com \
    --cc=yuchao0@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.