All of lore.kernel.org
 help / color / mirror / Atom feed
From: Goldwyn Rodrigues <rgoldwyn-l3A5Bk7waGM@public.gmane.org>
To: linux-btrfs-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
Cc: kilobyte-b9QjgO8OEXPVItvQsEIGlw@public.gmane.org,
	jack-AlSwsSmVLrQ@public.gmane.org,
	darrick.wong-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org,
	nborisov-IBi9RG/b67k@public.gmane.org,
	linux-nvdimm-hn68Rpc1hR1g9hUCZPvPmw@public.gmane.org,
	david-FqsqvQoI3Ljby3iVrkZq2A@public.gmane.org,
	dsterba-AlSwsSmVLrQ@public.gmane.org,
	willy-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org,
	linux-fsdevel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	hch-jcswGhMUV9g@public.gmane.org,
	Goldwyn Rodrigues <rgoldwyn-IBi9RG/b67k@public.gmane.org>
Subject: [PATCH 07/18] btrfs: add dax write support
Date: Mon, 29 Apr 2019 12:26:38 -0500	[thread overview]
Message-ID: <20190429172649.8288-8-rgoldwyn@suse.de> (raw)
In-Reply-To: <20190429172649.8288-1-rgoldwyn-l3A5Bk7waGM@public.gmane.org>

From: Goldwyn Rodrigues <rgoldwyn-IBi9RG/b67k@public.gmane.org>

IOMAP_DAX_COW allows to inform the dax code, to first perform
a copy which are not page-aligned before performing the write.
The responsibility of checking if data edges are page aligned
is performed in ->iomap_begin() and the source address is
stored in ->inline_data

A new struct btrfs_iomap is passed from iomap_begin() to
iomap_end(), which contains all the accounting and locking information
for CoW based writes.

For writing to a hole, iomap->inline_data is set to zero.

Signed-off-by: Goldwyn Rodrigues <rgoldwyn-IBi9RG/b67k@public.gmane.org>
---
 fs/btrfs/ctree.h |   6 ++
 fs/btrfs/dax.c   | 182 +++++++++++++++++++++++++++++++++++++++++++++++++++++--
 fs/btrfs/file.c  |   4 +-
 3 files changed, 185 insertions(+), 7 deletions(-)

diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 1e3e758b83c2..eec01eb92f33 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3801,6 +3801,12 @@ int btree_readahead_hook(struct extent_buffer *eb, int err);
 #ifdef CONFIG_FS_DAX
 /* dax.c */
 ssize_t btrfs_file_dax_read(struct kiocb *iocb, struct iov_iter *to);
+ssize_t btrfs_file_dax_write(struct kiocb *iocb, struct iov_iter *from);
+#else
+static inline ssize_t btrfs_file_dax_write(struct kiocb *iocb, struct iov_iter *from)
+{
+	return 0;
+}
 #endif /* CONFIG_FS_DAX */
 
 static inline int is_fstree(u64 rootid)
diff --git a/fs/btrfs/dax.c b/fs/btrfs/dax.c
index bf3d46b0acb6..f5cc9bcdbf14 100644
--- a/fs/btrfs/dax.c
+++ b/fs/btrfs/dax.c
@@ -9,30 +9,184 @@
 #ifdef CONFIG_FS_DAX
 #include <linux/dax.h>
 #include <linux/iomap.h>
+#include <linux/uio.h>
 #include "ctree.h"
 #include "btrfs_inode.h"
 
+struct btrfs_iomap {
+	u64 start;
+	u64 end;
+	bool nocow;
+	struct extent_changeset *data_reserved;
+	struct extent_state *cached_state;
+};
+
+static struct btrfs_iomap *btrfs_iomap_init(struct inode *inode,
+				     struct extent_map **em,
+				     loff_t pos, loff_t length)
+{
+	int ret = 0;
+	struct extent_map *map = *em;
+	struct btrfs_iomap *bi;
+
+	bi = kzalloc(sizeof(struct btrfs_iomap), GFP_NOFS);
+	if (!bi)
+		return ERR_PTR(-ENOMEM);
+
+	bi->start = round_down(pos, PAGE_SIZE);
+	bi->end = PAGE_ALIGN(pos + length);
+
+	/* Wait for existing ordered extents in range to finish */
+	btrfs_wait_ordered_range(inode, bi->start, bi->end - bi->start);
+
+	lock_extent_bits(&BTRFS_I(inode)->io_tree, bi->start, bi->end, &bi->cached_state);
+
+	ret = btrfs_delalloc_reserve_space(inode, &bi->data_reserved,
+			bi->start, bi->end - bi->start);
+	if (ret) {
+		unlock_extent_cached(&BTRFS_I(inode)->io_tree, bi->start, bi->end,
+				&bi->cached_state);
+		kfree(bi);
+		return ERR_PTR(ret);
+	}
+
+	refcount_inc(&map->refs);
+	ret = btrfs_get_extent_map_write(em, NULL,
+			inode, bi->start, bi->end - bi->start, &bi->nocow);
+	if (ret) {
+		unlock_extent_cached(&BTRFS_I(inode)->io_tree, bi->start, bi->end,
+				&bi->cached_state);
+		btrfs_delalloc_release_space(inode,
+				bi->data_reserved, bi->start,
+				bi->end - bi->start, true);
+		extent_changeset_free(bi->data_reserved);
+		kfree(bi);
+		return ERR_PTR(ret);
+	}
+	free_extent_map(map);
+	return bi;
+}
+
+static void *dax_address(struct block_device *bdev, struct dax_device *dax_dev,
+			 sector_t sector, loff_t pos, loff_t length)
+{
+	size_t size = ALIGN(pos + length, PAGE_SIZE);
+	int id, ret = 0;
+	void *kaddr = NULL;
+	pgoff_t pgoff;
+	long map_len;
+
+	id = dax_read_lock();
+
+	ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
+	if (ret)
+		goto out;
+
+	map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
+			&kaddr, NULL);
+	if (map_len < 0)
+		ret = map_len;
+
+out:
+	dax_read_unlock(id);
+	if (ret)
+		return ERR_PTR(ret);
+	return kaddr;
+}
+
 static int btrfs_iomap_begin(struct inode *inode, loff_t pos,
 		loff_t length, unsigned flags, struct iomap *iomap)
 {
 	struct extent_map *em;
 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+	struct btrfs_iomap *bi = NULL;
+	unsigned offset = pos & (PAGE_SIZE - 1);
+	u64 srcblk = 0;
+	loff_t diff;
+
 	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, pos, length, 0);
+
+	iomap->type = IOMAP_MAPPED;
+
+	if (flags & IOMAP_WRITE) {
+		if (em->block_start != EXTENT_MAP_HOLE)
+			srcblk = em->block_start + pos - em->start - offset;
+
+		bi = btrfs_iomap_init(inode, &em, pos, length);
+		if (IS_ERR(bi))
+			return PTR_ERR(bi);
+
+	}
+
+	/*
+	 * Advance the difference between pos and start, to align well with
+	 * inline_data in case of writes
+	 */
+	diff = round_down(pos - em->start, PAGE_SIZE);
+	iomap->offset = em->start + diff;
+	iomap->length = em->len - diff;
+	iomap->bdev = em->bdev;
+	iomap->dax_dev = fs_info->dax_dev;
+
+	/*
+	 * This will be true for reads only since we have already
+	 * allocated em
+	 */
 	if (em->block_start == EXTENT_MAP_HOLE) {
 		iomap->type = IOMAP_HOLE;
 		return 0;
 	}
-	iomap->type = IOMAP_MAPPED;
-	iomap->bdev = em->bdev;
-	iomap->dax_dev = fs_info->dax_dev;
-	iomap->offset = em->start;
-	iomap->length = em->len;
-	iomap->addr = em->block_start;
+
+	iomap->addr = em->block_start + diff;
+	/* Check if we really need to copy data from old extent */
+	if (bi && !bi->nocow && (offset || pos + length < bi->end)) {
+		iomap->type = IOMAP_DAX_COW;
+		if (srcblk) {
+			sector_t sector = (srcblk + (pos & PAGE_MASK) -
+					  iomap->offset) >> 9;
+			iomap->inline_data = dax_address(em->bdev,
+					fs_info->dax_dev, sector, pos, length);
+			if (IS_ERR(iomap->inline_data)) {
+				kfree(bi);
+				return PTR_ERR(iomap->inline_data);
+			}
+		}
+	}
+
+	iomap->private = bi;
+	return 0;
+}
+
+static int btrfs_iomap_end(struct inode *inode, loff_t pos,
+		loff_t length, ssize_t written, unsigned flags,
+		struct iomap *iomap)
+{
+	struct btrfs_iomap *bi = iomap->private;
+	u64 wend;
+
+	if (!bi)
+		return 0;
+
+	unlock_extent_cached(&BTRFS_I(inode)->io_tree, bi->start, bi->end,
+			&bi->cached_state);
+
+	wend = PAGE_ALIGN(pos + written);
+	if (wend < bi->end) {
+		btrfs_delalloc_release_space(inode,
+				bi->data_reserved, wend,
+				bi->end - wend, true);
+	}
+
+	btrfs_update_ordered_extent(inode, bi->start, wend - bi->start, true);
+	btrfs_delalloc_release_extents(BTRFS_I(inode), wend - bi->start, false);
+	extent_changeset_free(bi->data_reserved);
+	kfree(bi);
 	return 0;
 }
 
 static const struct iomap_ops btrfs_iomap_ops = {
 	.iomap_begin		= btrfs_iomap_begin,
+	.iomap_end		= btrfs_iomap_end,
 };
 
 ssize_t btrfs_file_dax_read(struct kiocb *iocb, struct iov_iter *to)
@@ -46,4 +200,20 @@ ssize_t btrfs_file_dax_read(struct kiocb *iocb, struct iov_iter *to)
 
 	return ret;
 }
+
+ssize_t btrfs_file_dax_write(struct kiocb *iocb, struct iov_iter *iter)
+{
+	ssize_t ret = 0;
+	u64 pos = iocb->ki_pos;
+	struct inode *inode = file_inode(iocb->ki_filp);
+	ret = dax_iomap_rw(iocb, iter, &btrfs_iomap_ops);
+
+	if (ret > 0) {
+		pos += ret;
+		if (pos > i_size_read(inode))
+			i_size_write(inode, pos);
+		iocb->ki_pos = pos;
+	}
+	return ret;
+}
 #endif /* CONFIG_FS_DAX */
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 9194591f9eea..a795023e26ca 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1964,7 +1964,9 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
 	if (sync)
 		atomic_inc(&BTRFS_I(inode)->sync_writers);
 
-	if (iocb->ki_flags & IOCB_DIRECT) {
+	if (IS_DAX(inode)) {
+		num_written = btrfs_file_dax_write(iocb, from);
+	} else if (iocb->ki_flags & IOCB_DIRECT) {
 		num_written = __btrfs_direct_write(iocb, from);
 	} else {
 		num_written = btrfs_buffered_write(iocb, from);
-- 
2.16.4

WARNING: multiple messages have this Message-ID (diff)
From: Goldwyn Rodrigues <rgoldwyn@suse.de>
To: linux-btrfs@vger.kernel.org
Cc: kilobyte@angband.pl, linux-fsdevel@vger.kernel.org, jack@suse.cz,
	david@fromorbit.com, willy@infradead.org, hch@lst.de,
	darrick.wong@oracle.com, dsterba@suse.cz, nborisov@suse.com,
	linux-nvdimm@lists.01.org, Goldwyn Rodrigues <rgoldwyn@suse.com>
Subject: [PATCH 07/18] btrfs: add dax write support
Date: Mon, 29 Apr 2019 12:26:38 -0500	[thread overview]
Message-ID: <20190429172649.8288-8-rgoldwyn@suse.de> (raw)
In-Reply-To: <20190429172649.8288-1-rgoldwyn@suse.de>

From: Goldwyn Rodrigues <rgoldwyn@suse.com>

IOMAP_DAX_COW allows to inform the dax code, to first perform
a copy which are not page-aligned before performing the write.
The responsibility of checking if data edges are page aligned
is performed in ->iomap_begin() and the source address is
stored in ->inline_data

A new struct btrfs_iomap is passed from iomap_begin() to
iomap_end(), which contains all the accounting and locking information
for CoW based writes.

For writing to a hole, iomap->inline_data is set to zero.

Signed-off-by: Goldwyn Rodrigues <rgoldwyn@suse.com>
---
 fs/btrfs/ctree.h |   6 ++
 fs/btrfs/dax.c   | 182 +++++++++++++++++++++++++++++++++++++++++++++++++++++--
 fs/btrfs/file.c  |   4 +-
 3 files changed, 185 insertions(+), 7 deletions(-)

diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 1e3e758b83c2..eec01eb92f33 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3801,6 +3801,12 @@ int btree_readahead_hook(struct extent_buffer *eb, int err);
 #ifdef CONFIG_FS_DAX
 /* dax.c */
 ssize_t btrfs_file_dax_read(struct kiocb *iocb, struct iov_iter *to);
+ssize_t btrfs_file_dax_write(struct kiocb *iocb, struct iov_iter *from);
+#else
+static inline ssize_t btrfs_file_dax_write(struct kiocb *iocb, struct iov_iter *from)
+{
+	return 0;
+}
 #endif /* CONFIG_FS_DAX */
 
 static inline int is_fstree(u64 rootid)
diff --git a/fs/btrfs/dax.c b/fs/btrfs/dax.c
index bf3d46b0acb6..f5cc9bcdbf14 100644
--- a/fs/btrfs/dax.c
+++ b/fs/btrfs/dax.c
@@ -9,30 +9,184 @@
 #ifdef CONFIG_FS_DAX
 #include <linux/dax.h>
 #include <linux/iomap.h>
+#include <linux/uio.h>
 #include "ctree.h"
 #include "btrfs_inode.h"
 
+struct btrfs_iomap {
+	u64 start;
+	u64 end;
+	bool nocow;
+	struct extent_changeset *data_reserved;
+	struct extent_state *cached_state;
+};
+
+static struct btrfs_iomap *btrfs_iomap_init(struct inode *inode,
+				     struct extent_map **em,
+				     loff_t pos, loff_t length)
+{
+	int ret = 0;
+	struct extent_map *map = *em;
+	struct btrfs_iomap *bi;
+
+	bi = kzalloc(sizeof(struct btrfs_iomap), GFP_NOFS);
+	if (!bi)
+		return ERR_PTR(-ENOMEM);
+
+	bi->start = round_down(pos, PAGE_SIZE);
+	bi->end = PAGE_ALIGN(pos + length);
+
+	/* Wait for existing ordered extents in range to finish */
+	btrfs_wait_ordered_range(inode, bi->start, bi->end - bi->start);
+
+	lock_extent_bits(&BTRFS_I(inode)->io_tree, bi->start, bi->end, &bi->cached_state);
+
+	ret = btrfs_delalloc_reserve_space(inode, &bi->data_reserved,
+			bi->start, bi->end - bi->start);
+	if (ret) {
+		unlock_extent_cached(&BTRFS_I(inode)->io_tree, bi->start, bi->end,
+				&bi->cached_state);
+		kfree(bi);
+		return ERR_PTR(ret);
+	}
+
+	refcount_inc(&map->refs);
+	ret = btrfs_get_extent_map_write(em, NULL,
+			inode, bi->start, bi->end - bi->start, &bi->nocow);
+	if (ret) {
+		unlock_extent_cached(&BTRFS_I(inode)->io_tree, bi->start, bi->end,
+				&bi->cached_state);
+		btrfs_delalloc_release_space(inode,
+				bi->data_reserved, bi->start,
+				bi->end - bi->start, true);
+		extent_changeset_free(bi->data_reserved);
+		kfree(bi);
+		return ERR_PTR(ret);
+	}
+	free_extent_map(map);
+	return bi;
+}
+
+static void *dax_address(struct block_device *bdev, struct dax_device *dax_dev,
+			 sector_t sector, loff_t pos, loff_t length)
+{
+	size_t size = ALIGN(pos + length, PAGE_SIZE);
+	int id, ret = 0;
+	void *kaddr = NULL;
+	pgoff_t pgoff;
+	long map_len;
+
+	id = dax_read_lock();
+
+	ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
+	if (ret)
+		goto out;
+
+	map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
+			&kaddr, NULL);
+	if (map_len < 0)
+		ret = map_len;
+
+out:
+	dax_read_unlock(id);
+	if (ret)
+		return ERR_PTR(ret);
+	return kaddr;
+}
+
 static int btrfs_iomap_begin(struct inode *inode, loff_t pos,
 		loff_t length, unsigned flags, struct iomap *iomap)
 {
 	struct extent_map *em;
 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+	struct btrfs_iomap *bi = NULL;
+	unsigned offset = pos & (PAGE_SIZE - 1);
+	u64 srcblk = 0;
+	loff_t diff;
+
 	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, pos, length, 0);
+
+	iomap->type = IOMAP_MAPPED;
+
+	if (flags & IOMAP_WRITE) {
+		if (em->block_start != EXTENT_MAP_HOLE)
+			srcblk = em->block_start + pos - em->start - offset;
+
+		bi = btrfs_iomap_init(inode, &em, pos, length);
+		if (IS_ERR(bi))
+			return PTR_ERR(bi);
+
+	}
+
+	/*
+	 * Advance the difference between pos and start, to align well with
+	 * inline_data in case of writes
+	 */
+	diff = round_down(pos - em->start, PAGE_SIZE);
+	iomap->offset = em->start + diff;
+	iomap->length = em->len - diff;
+	iomap->bdev = em->bdev;
+	iomap->dax_dev = fs_info->dax_dev;
+
+	/*
+	 * This will be true for reads only since we have already
+	 * allocated em
+	 */
 	if (em->block_start == EXTENT_MAP_HOLE) {
 		iomap->type = IOMAP_HOLE;
 		return 0;
 	}
-	iomap->type = IOMAP_MAPPED;
-	iomap->bdev = em->bdev;
-	iomap->dax_dev = fs_info->dax_dev;
-	iomap->offset = em->start;
-	iomap->length = em->len;
-	iomap->addr = em->block_start;
+
+	iomap->addr = em->block_start + diff;
+	/* Check if we really need to copy data from old extent */
+	if (bi && !bi->nocow && (offset || pos + length < bi->end)) {
+		iomap->type = IOMAP_DAX_COW;
+		if (srcblk) {
+			sector_t sector = (srcblk + (pos & PAGE_MASK) -
+					  iomap->offset) >> 9;
+			iomap->inline_data = dax_address(em->bdev,
+					fs_info->dax_dev, sector, pos, length);
+			if (IS_ERR(iomap->inline_data)) {
+				kfree(bi);
+				return PTR_ERR(iomap->inline_data);
+			}
+		}
+	}
+
+	iomap->private = bi;
+	return 0;
+}
+
+static int btrfs_iomap_end(struct inode *inode, loff_t pos,
+		loff_t length, ssize_t written, unsigned flags,
+		struct iomap *iomap)
+{
+	struct btrfs_iomap *bi = iomap->private;
+	u64 wend;
+
+	if (!bi)
+		return 0;
+
+	unlock_extent_cached(&BTRFS_I(inode)->io_tree, bi->start, bi->end,
+			&bi->cached_state);
+
+	wend = PAGE_ALIGN(pos + written);
+	if (wend < bi->end) {
+		btrfs_delalloc_release_space(inode,
+				bi->data_reserved, wend,
+				bi->end - wend, true);
+	}
+
+	btrfs_update_ordered_extent(inode, bi->start, wend - bi->start, true);
+	btrfs_delalloc_release_extents(BTRFS_I(inode), wend - bi->start, false);
+	extent_changeset_free(bi->data_reserved);
+	kfree(bi);
 	return 0;
 }
 
 static const struct iomap_ops btrfs_iomap_ops = {
 	.iomap_begin		= btrfs_iomap_begin,
+	.iomap_end		= btrfs_iomap_end,
 };
 
 ssize_t btrfs_file_dax_read(struct kiocb *iocb, struct iov_iter *to)
@@ -46,4 +200,20 @@ ssize_t btrfs_file_dax_read(struct kiocb *iocb, struct iov_iter *to)
 
 	return ret;
 }
+
+ssize_t btrfs_file_dax_write(struct kiocb *iocb, struct iov_iter *iter)
+{
+	ssize_t ret = 0;
+	u64 pos = iocb->ki_pos;
+	struct inode *inode = file_inode(iocb->ki_filp);
+	ret = dax_iomap_rw(iocb, iter, &btrfs_iomap_ops);
+
+	if (ret > 0) {
+		pos += ret;
+		if (pos > i_size_read(inode))
+			i_size_write(inode, pos);
+		iocb->ki_pos = pos;
+	}
+	return ret;
+}
 #endif /* CONFIG_FS_DAX */
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 9194591f9eea..a795023e26ca 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1964,7 +1964,9 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
 	if (sync)
 		atomic_inc(&BTRFS_I(inode)->sync_writers);
 
-	if (iocb->ki_flags & IOCB_DIRECT) {
+	if (IS_DAX(inode)) {
+		num_written = btrfs_file_dax_write(iocb, from);
+	} else if (iocb->ki_flags & IOCB_DIRECT) {
 		num_written = __btrfs_direct_write(iocb, from);
 	} else {
 		num_written = btrfs_buffered_write(iocb, from);
-- 
2.16.4


  parent reply	other threads:[~2019-04-29 17:26 UTC|newest]

Thread overview: 111+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-04-29 17:26 [PATCH v4 00/18] btrfs dax support Goldwyn Rodrigues
2019-04-29 17:26 ` Goldwyn Rodrigues
2019-04-29 17:26 ` [PATCH 01/18] btrfs: create a mount option for dax Goldwyn Rodrigues
2019-04-29 17:26   ` Goldwyn Rodrigues
2019-05-21 18:02   ` Darrick J. Wong
2019-05-21 18:02     ` Darrick J. Wong
2019-04-29 17:26 ` [PATCH 02/18] btrfs: Carve out btrfs_get_extent_map_write() out of btrfs_get_blocks_write() Goldwyn Rodrigues
2019-04-29 17:26   ` Goldwyn Rodrigues
2019-04-29 17:26 ` [PATCH 04/18] dax: Introduce IOMAP_DAX_COW to CoW edges during writes Goldwyn Rodrigues
2019-04-29 17:26   ` Goldwyn Rodrigues
2019-05-21 16:51   ` Darrick J. Wong
2019-05-22 20:14     ` Goldwyn Rodrigues
2019-05-22 20:14       ` Goldwyn Rodrigues
2019-05-23  2:10       ` Dave Chinner
2019-05-23  2:10         ` Dave Chinner
2019-05-23  9:05     ` Shiyang Ruan
2019-05-23  9:05       ` Shiyang Ruan
2019-05-23 11:51       ` Goldwyn Rodrigues
2019-05-23 11:51         ` Goldwyn Rodrigues
2019-05-27  8:25         ` Shiyang Ruan
2019-05-27  8:25           ` Shiyang Ruan
2019-05-28  9:17           ` Jan Kara
2019-05-28  9:17             ` Jan Kara
2019-05-29  2:01             ` Shiyang Ruan
2019-05-29  2:01               ` Shiyang Ruan
2019-05-29  2:47               ` Dave Chinner
2019-05-29  2:47                 ` Dave Chinner
2019-05-29  4:02                 ` Shiyang Ruan
2019-05-29  4:02                   ` Shiyang Ruan
2019-05-29  4:07                   ` Darrick J. Wong
2019-05-29  4:07                     ` Darrick J. Wong
2019-05-29  4:46                     ` Dave Chinner
2019-05-29  4:46                       ` Dave Chinner
2019-05-29 13:46                       ` Jan Kara
2019-05-29 13:46                         ` Jan Kara
2019-05-29 22:14                         ` Dave Chinner
2019-05-29 22:14                           ` Dave Chinner
2019-05-30 11:16                           ` Jan Kara
2019-05-30 11:16                             ` Jan Kara
2019-05-30 22:59                             ` Dave Chinner
2019-05-30 22:59                               ` Dave Chinner
2019-04-29 17:26 ` [PATCH 05/18] btrfs: return whether extent is nocow or not Goldwyn Rodrigues
2019-04-29 17:26   ` Goldwyn Rodrigues
2019-04-29 17:26 ` [PATCH 06/18] btrfs: Rename __endio_write_update_ordered() to btrfs_update_ordered_extent() Goldwyn Rodrigues
2019-04-29 17:26   ` Goldwyn Rodrigues
     [not found] ` <20190429172649.8288-1-rgoldwyn-l3A5Bk7waGM@public.gmane.org>
2019-04-29 17:26   ` [PATCH 03/18] btrfs: basic dax read Goldwyn Rodrigues
2019-04-29 17:26     ` Goldwyn Rodrigues
     [not found]     ` <20190429172649.8288-4-rgoldwyn-l3A5Bk7waGM@public.gmane.org>
2019-05-21 15:14       ` Darrick J. Wong
2019-05-21 15:14         ` Darrick J. Wong
2019-05-22 21:50         ` Goldwyn Rodrigues
2019-04-29 17:26   ` Goldwyn Rodrigues [this message]
2019-04-29 17:26     ` [PATCH 07/18] btrfs: add dax write support Goldwyn Rodrigues
     [not found]     ` <20190429172649.8288-8-rgoldwyn-l3A5Bk7waGM@public.gmane.org>
2019-05-21 17:08       ` Darrick J. Wong
2019-05-21 17:08         ` Darrick J. Wong
2019-04-29 17:26   ` [PATCH 13/18] fs: dedup file range to use a compare function Goldwyn Rodrigues
2019-04-29 17:26     ` Goldwyn Rodrigues
     [not found]     ` <20190429172649.8288-14-rgoldwyn-l3A5Bk7waGM@public.gmane.org>
2019-05-21 18:17       ` Darrick J. Wong
2019-05-21 18:17         ` Darrick J. Wong
2019-04-29 17:26   ` [PATCH 18/18] btrfs: trace functions for btrfs_iomap_begin/end Goldwyn Rodrigues
2019-04-29 17:26     ` Goldwyn Rodrigues
2019-04-29 17:26 ` [PATCH 08/18] dax: memcpy page in case of IOMAP_DAX_COW for mmap faults Goldwyn Rodrigues
2019-04-29 17:26   ` Goldwyn Rodrigues
2019-05-21 17:46   ` Darrick J. Wong
2019-05-21 17:46     ` Darrick J. Wong
2019-05-22 19:11     ` Goldwyn Rodrigues
2019-05-22 19:11       ` Goldwyn Rodrigues
2019-05-23  4:02       ` Darrick J. Wong
2019-05-23  4:02         ` Darrick J. Wong
2019-05-23 12:10     ` Jan Kara
2019-05-23 12:10       ` Jan Kara
2019-04-29 17:26 ` [PATCH 09/18] btrfs: Add dax specific address_space_operations Goldwyn Rodrigues
2019-04-29 17:26   ` Goldwyn Rodrigues
2019-04-29 17:26 ` [PATCH 10/18] dax: replace mmap entry in case of CoW Goldwyn Rodrigues
2019-04-29 17:26   ` Goldwyn Rodrigues
2019-05-21 17:35   ` Darrick J. Wong
2019-05-21 17:35     ` Darrick J. Wong
2019-05-23 13:38   ` Jan Kara
2019-05-23 13:38     ` Jan Kara
2019-04-29 17:26 ` [PATCH 11/18] btrfs: add dax mmap support Goldwyn Rodrigues
2019-04-29 17:26   ` Goldwyn Rodrigues
2019-04-29 17:26 ` [PATCH 12/18] btrfs: allow MAP_SYNC mmap Goldwyn Rodrigues
2019-04-29 17:26   ` Goldwyn Rodrigues
2019-05-10 15:32   ` [PATCH for-goldwyn] btrfs: disallow MAP_SYNC outside of DAX mounts Adam Borowski
2019-05-10 15:32     ` Adam Borowski
2019-05-10 15:41     ` Dan Williams
2019-05-10 15:41       ` Dan Williams
2019-05-10 15:59       ` Pankaj Gupta
2019-05-10 15:59         ` Pankaj Gupta
2019-05-23 13:44   ` [PATCH 12/18] btrfs: allow MAP_SYNC mmap Jan Kara
2019-05-23 13:44     ` Jan Kara
2019-05-23 16:19     ` Adam Borowski
2019-05-23 16:19       ` Adam Borowski
2019-04-29 17:26 ` [PATCH 14/18] dax: memcpy before zeroing range Goldwyn Rodrigues
2019-04-29 17:26   ` Goldwyn Rodrigues
2019-05-21 17:27   ` Darrick J. Wong
2019-05-21 17:27     ` Darrick J. Wong
2019-04-29 17:26 ` [PATCH 15/18] btrfs: handle dax page zeroing Goldwyn Rodrigues
2019-04-29 17:26 ` [PATCH 16/18] btrfs: Writeprotect mmap pages on snapshot Goldwyn Rodrigues
2019-04-29 17:26   ` Goldwyn Rodrigues
2019-05-23 14:04   ` Jan Kara
2019-05-23 14:04     ` Jan Kara
2019-05-23 15:27     ` Goldwyn Rodrigues
2019-05-23 15:27       ` Goldwyn Rodrigues
2019-05-23 19:07       ` Jan Kara
2019-05-23 19:07         ` Jan Kara
2019-05-23 21:22         ` Goldwyn Rodrigues
2019-05-23 21:22           ` Goldwyn Rodrigues
2019-04-29 17:26 ` [PATCH 17/18] btrfs: Disable dax-based defrag and send Goldwyn Rodrigues
2019-04-29 17:26   ` Goldwyn Rodrigues
  -- strict thread matches above, loose matches on Subject: below --
2019-04-16 16:41 [PATCH v3 00/18] btrfs dax support Goldwyn Rodrigues
2019-04-16 16:41 ` [PATCH 07/18] btrfs: add dax write support Goldwyn Rodrigues
2019-04-16 16:41   ` Goldwyn Rodrigues

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190429172649.8288-8-rgoldwyn@suse.de \
    --to=rgoldwyn-l3a5bk7wagm@public.gmane.org \
    --cc=darrick.wong-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org \
    --cc=david-FqsqvQoI3Ljby3iVrkZq2A@public.gmane.org \
    --cc=dsterba-AlSwsSmVLrQ@public.gmane.org \
    --cc=hch-jcswGhMUV9g@public.gmane.org \
    --cc=jack-AlSwsSmVLrQ@public.gmane.org \
    --cc=kilobyte-b9QjgO8OEXPVItvQsEIGlw@public.gmane.org \
    --cc=linux-btrfs-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
    --cc=linux-fsdevel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
    --cc=linux-nvdimm-hn68Rpc1hR1g9hUCZPvPmw@public.gmane.org \
    --cc=nborisov-IBi9RG/b67k@public.gmane.org \
    --cc=rgoldwyn-IBi9RG/b67k@public.gmane.org \
    --cc=willy-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.