All of lore.kernel.org
 help / color / mirror / Atom feed
From: Goldwyn Rodrigues <rgoldwyn@suse.de>
To: linux-btrfs@vger.kernel.org
Cc: linux-fsdevel@vger.kernel.org, Goldwyn Rodrigues <rgoldwyn@suse.com>
Subject: [PATCH 09/15] btrfs: add dax mmap support
Date: Tue, 26 Mar 2019 14:02:55 -0500	[thread overview]
Message-ID: <20190326190301.32365-10-rgoldwyn@suse.de> (raw)
In-Reply-To: <20190326190301.32365-1-rgoldwyn@suse.de>

From: Goldwyn Rodrigues <rgoldwyn@suse.com>

Add a new vm_operations struct btrfs_dax_vm_ops
specifically for dax files.

Since we will be removing(nulling) readpages/writepages for dax
return ENOEXEC only for non-dax files.

dax_insert_entry() looks ugly. Do you think we should break it
into dax_insert_cow_entry() and dax_insert_entry()?

Signed-off-by: Goldwyn Rodrigues <rgoldwyn@suse.com>
---
 fs/btrfs/ctree.h |  1 +
 fs/btrfs/dax.c   | 11 +++++++++++
 fs/btrfs/file.c  | 18 ++++++++++++++++--
 fs/dax.c         | 17 ++++++++++-------
 4 files changed, 38 insertions(+), 9 deletions(-)

diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 3bcd2a4959c1..0e5060933bde 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3802,6 +3802,7 @@ int btree_readahead_hook(struct extent_buffer *eb, int err);
 /* dax.c */
 ssize_t btrfs_file_dax_read(struct kiocb *iocb, struct iov_iter *to);
 ssize_t btrfs_file_dax_write(struct kiocb *iocb, struct iov_iter *from);
+vm_fault_t btrfs_dax_fault(struct vm_fault *vmf);
 #else
 static inline ssize_t btrfs_file_dax_write(struct kiocb *iocb, struct iov_iter *from)
 {
diff --git a/fs/btrfs/dax.c b/fs/btrfs/dax.c
index 49619fe3f94f..927f962d1e88 100644
--- a/fs/btrfs/dax.c
+++ b/fs/btrfs/dax.c
@@ -157,4 +157,15 @@ ssize_t btrfs_file_dax_write(struct kiocb *iocb, struct iov_iter *iter)
 	}
 	return ret;
 }
+
+vm_fault_t btrfs_dax_fault(struct vm_fault *vmf)
+{
+	vm_fault_t ret;
+	pfn_t pfn;
+	ret = dax_iomap_fault(vmf, PE_SIZE_PTE, &pfn, NULL, &btrfs_iomap_ops);
+	if (ret & VM_FAULT_NEEDDSYNC)
+		ret = dax_finish_sync_fault(vmf, PE_SIZE_PTE, pfn);
+
+	return ret;
+}
 #endif /* CONFIG_FS_DAX */
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 3b320d0ab495..196c8f37ff9d 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2214,15 +2214,29 @@ static const struct vm_operations_struct btrfs_file_vm_ops = {
 	.page_mkwrite	= btrfs_page_mkwrite,
 };
 
+#ifdef CONFIG_FS_DAX
+static const struct vm_operations_struct btrfs_dax_vm_ops = {
+	.fault          = btrfs_dax_fault,
+	.page_mkwrite   = btrfs_dax_fault,
+	.pfn_mkwrite    = btrfs_dax_fault,
+};
+#else
+#define btrfs_dax_vm_ops btrfs_file_vm_ops
+#endif
+
 static int btrfs_file_mmap(struct file	*filp, struct vm_area_struct *vma)
 {
 	struct address_space *mapping = filp->f_mapping;
+	struct inode *inode = file_inode(filp);
 
-	if (!mapping->a_ops->readpage)
+	if (!IS_DAX(inode) && !mapping->a_ops->readpage)
 		return -ENOEXEC;
 
 	file_accessed(filp);
-	vma->vm_ops = &btrfs_file_vm_ops;
+	if (IS_DAX(inode))
+		vma->vm_ops = &btrfs_dax_vm_ops;
+	else
+		vma->vm_ops = &btrfs_file_vm_ops;
 
 	return 0;
 }
diff --git a/fs/dax.c b/fs/dax.c
index 21ee3df6f02c..41061da42771 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -708,14 +708,15 @@ static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
  */
 static void *dax_insert_entry(struct xa_state *xas,
 		struct address_space *mapping, struct vm_fault *vmf,
-		void *entry, pfn_t pfn, unsigned long flags, bool dirty)
+		void *entry, pfn_t pfn, unsigned long flags, bool dirty,
+		bool cow)
 {
 	void *new_entry = dax_make_entry(pfn, flags);
 
 	if (dirty)
 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 
-	if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) {
+	if (cow || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) {
 		unsigned long index = xas->xa_index;
 		/* we are replacing a zero page with block mapping */
 		if (dax_is_pmd_entry(entry))
@@ -732,7 +733,7 @@ static void *dax_insert_entry(struct xa_state *xas,
 		dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
 	}
 
-	if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
+	if (cow || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
 		/*
 		 * Only swap our new entry into the page cache if the current
 		 * entry is a zero page or an empty entry.  If a normal PTE or
@@ -1031,7 +1032,7 @@ static vm_fault_t dax_load_hole(struct xa_state *xas,
 	vm_fault_t ret;
 
 	*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
-			DAX_ZERO_PAGE, false);
+			DAX_ZERO_PAGE, false, false);
 
 	ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
 	trace_dax_load_hole(inode, vmf, ret);
@@ -1408,7 +1409,8 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 			goto error_finish_iomap;
 
 		entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
-						 0, write && !sync);
+						 0, write && !sync,
+						(iomap.flags & IOMAP_F_COW) != 0);
 
 		/*
 		 * If we are doing synchronous page fault and inode needs fsync,
@@ -1487,7 +1489,7 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
 
 	pfn = page_to_pfn_t(zero_page);
 	*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
-			DAX_PMD | DAX_ZERO_PAGE, false);
+			DAX_PMD | DAX_ZERO_PAGE, false, false);
 
 	ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
 	if (!pmd_none(*(vmf->pmd))) {
@@ -1610,7 +1612,8 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 			goto finish_iomap;
 
 		entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
-						DAX_PMD, write && !sync);
+						DAX_PMD, write && !sync,
+						false);
 
 		/*
 		 * If we are doing synchronous page fault and inode needs fsync,
-- 
2.16.4


  parent reply	other threads:[~2019-03-26 19:03 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <20190326190301.32365-1-rgoldwyn@suse.de>
2019-03-26 19:02 ` [PATCH 01/15] btrfs: create a mount option for dax Goldwyn Rodrigues
2019-03-26 19:10   ` Matthew Wilcox
2019-03-27 11:00     ` Goldwyn Rodrigues
2019-03-27 12:00       ` Matthew Wilcox
2019-03-27 12:26         ` Goldwyn Rodrigues
2019-03-27 23:31         ` Goldwyn Rodrigues
2019-03-27 17:38     ` Adam Borowski
2019-03-28 14:49   ` David Sterba
2019-03-28 17:28   ` David Sterba
2019-03-28 17:57     ` Darrick J. Wong
2019-04-01 20:43     ` Goldwyn Rodrigues
2019-03-26 19:02 ` [PATCH 02/15] btrfs: Carve out btrfs_get_extent_map_write() out of btrfs_get_blocks_write() Goldwyn Rodrigues
2019-03-26 19:02 ` [PATCH 03/15] btrfs: basic dax read Goldwyn Rodrigues
2019-03-26 19:02 ` [PATCH 04/15] dax: Introduce IOMAP_F_COW for copy-on-write Goldwyn Rodrigues
2019-03-27 17:54   ` Darrick J. Wong
2019-03-27 18:58     ` Goldwyn Rodrigues
2019-03-28 14:45       ` Darrick J. Wong
2019-04-01  4:38   ` Dave Chinner
2019-04-01 21:41     ` Goldwyn Rodrigues
2019-04-01 23:06       ` Dave Chinner
2019-04-03  1:56         ` Goldwyn Rodrigues
2019-04-03  3:20           ` Dave Chinner
2019-04-07  7:26     ` Christoph Hellwig
2019-03-26 19:02 ` [PATCH 05/15] btrfs: return whether extent is nocow or not Goldwyn Rodrigues
2019-03-31 18:42   ` Nikolay Borisov
2019-03-26 19:02 ` [PATCH 06/15] btrfs: Rename __endio_write_update_ordered() to btrfs_update_ordered_extent() Goldwyn Rodrigues
2019-03-26 19:02 ` [PATCH 07/15] btrfs: add dax write support Goldwyn Rodrigues
2019-03-28 14:53   ` Darrick J. Wong
2019-04-01 20:39     ` Goldwyn Rodrigues
2019-03-26 19:02 ` [PATCH 08/15] dax: add dax_iomap_cow to copy a mmap page before writing Goldwyn Rodrigues
2019-03-28 15:41   ` Darrick J. Wong
2019-03-26 19:02 ` Goldwyn Rodrigues [this message]
2019-03-28 15:45   ` [PATCH 09/15] btrfs: add dax mmap support Darrick J. Wong
2019-03-26 19:02 ` [PATCH 10/15] btrfs: Add dax specific address_space_operations Goldwyn Rodrigues
2019-03-26 19:02 ` [PATCH 11/15] fs: dedup file range to use a compare function Goldwyn Rodrigues
2019-03-28 17:04   ` Darrick J. Wong
2019-04-01 20:36     ` Goldwyn Rodrigues
2019-03-26 19:02 ` [PATCH 12/15] btrfs: trace functions for btrfs_iomap_begin/end Goldwyn Rodrigues
2019-03-26 19:02 ` [PATCH 13/15] btrfs: handle dax page zeroing Goldwyn Rodrigues
2019-03-26 19:03 ` [PATCH 14/15] btrfs: Disable dax-based defrag and send Goldwyn Rodrigues
2019-03-26 19:03 ` [PATCH 15/15] btrfs: Writeprotect mmap pages on snapshot Goldwyn Rodrigues
2019-03-28 15:48   ` Darrick J. Wong
2019-03-26 19:09 ` [PATCH v2 00/15] btrfs dax support Goldwyn Rodrigues
2019-03-27 20:14   ` Adam Borowski
2019-03-27 23:26     ` Goldwyn Rodrigues
2019-03-28 10:24       ` [PATCH] btrfs: allow MAP_SYNC mmap Adam Borowski
2019-03-28 10:42         ` Adam Borowski
2019-04-01 20:08         ` Goldwyn Rodrigues

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190326190301.32365-10-rgoldwyn@suse.de \
    --to=rgoldwyn@suse.de \
    --cc=linux-btrfs@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=rgoldwyn@suse.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.