All of lore.kernel.org
 help / color / mirror / Atom feed
From: David Howells <dhowells@redhat.com>
To: Trond Myklebust <trondmy@hammerspace.com>,
	Anna Schumaker <anna.schumaker@netapp.com>,
	Steve French <sfrench@samba.org>,
	Jeff Layton <jlayton@redhat.com>
Cc: dhowells@redhat.com, Matthew Wilcox <willy@infradead.org>,
	Alexander Viro <viro@zeniv.linux.org.uk>,
	linux-afs@lists.infradead.org, linux-nfs@vger.kernel.org,
	linux-cifs@vger.kernel.org, ceph-devel@vger.kernel.org,
	v9fs-developer@lists.sourceforge.net,
	linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: [RFC PATCH 33/61] cachefiles: Implement a content-present indicator and bitmap
Date: Mon, 04 May 2020 18:12:27 +0100	[thread overview]
Message-ID: <158861234745.340223.12641191192775836182.stgit@warthog.procyon.org.uk> (raw)
In-Reply-To: <158861203563.340223.7585359869938129395.stgit@warthog.procyon.org.uk>

Implement a content indicator that indicates the presence or absence of
content and a bitmap that indicates which blocks of granular content are
present in a granular file.  This is added to the xattr that stores the
netfs coherency data, along with the file size and the file zero point (the
point after which it can be assumed that the server doesn't have any data).

In the content bitmap, if present, each bit indicates which 256KiB granules
of a cache file are present.  This is stored in a separate xattr, which is
loaded when the first I/O handle is created on that cache object and saved
when the object is discarded from memory.

Non-index objects in the cache can be monolithic or granular.  The content
map isn't used for monolithic objects (FSCACHE_COOKIE_ADV_SINGLE_CHUNK) as
they are expected to be all-or-nothing, so the content indicator alone
suffices.  Examples of this would be AFS directory or symlink content.

Signed-off-by: David Howells <dhowells@redhat.com>
---

 fs/cachefiles/Makefile            |    1 
 fs/cachefiles/bind.c              |    1 
 fs/cachefiles/content-map.c       |  251 +++++++++++++++++++++++++++++++++++++
 fs/cachefiles/interface.c         |    5 +
 fs/cachefiles/internal.h          |   31 +++++
 fs/cachefiles/io.c                |    4 +
 fs/cachefiles/xattr.c             |   24 +++-
 include/trace/events/cachefiles.h |    4 -
 8 files changed, 313 insertions(+), 8 deletions(-)
 create mode 100644 fs/cachefiles/content-map.c

diff --git a/fs/cachefiles/Makefile b/fs/cachefiles/Makefile
index d894d317d6e7..84615aca866a 100644
--- a/fs/cachefiles/Makefile
+++ b/fs/cachefiles/Makefile
@@ -5,6 +5,7 @@
 
 cachefiles-y := \
 	bind.o \
+	content-map.o \
 	daemon.o \
 	interface.o \
 	io.o \
diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
index 84fe89d5999e..40377633e3d9 100644
--- a/fs/cachefiles/bind.c
+++ b/fs/cachefiles/bind.c
@@ -102,6 +102,7 @@ static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache)
 		goto error_root_object;
 
 	atomic_set(&fsdef->usage, 1);
+	rwlock_init(&fsdef->content_map_lock);
 	fsdef->type = FSCACHE_COOKIE_TYPE_INDEX;
 
 	_debug("- fsdef %p", fsdef);
diff --git a/fs/cachefiles/content-map.c b/fs/cachefiles/content-map.c
new file mode 100644
index 000000000000..594624cb1cb9
--- /dev/null
+++ b/fs/cachefiles/content-map.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Datafile content management
+ *
+ * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/mount.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/swap.h>
+#include <linux/xattr.h>
+#include "internal.h"
+
+static const char cachefiles_xattr_content_map[] =
+	XATTR_USER_PREFIX "CacheFiles.content";
+
+static bool cachefiles_granule_is_present(struct cachefiles_object *object,
+					  size_t granule)
+{
+	bool res;
+
+	if (granule / 8 >= object->content_map_size)
+		return false;
+	read_lock_bh(&object->content_map_lock);
+	res = test_bit_le(granule, object->content_map);
+	read_unlock_bh(&object->content_map_lock);
+	return res;
+}
+
+/*
+ * Mark the content map to indicate stored granule.
+ */
+void cachefiles_mark_content_map(struct fscache_io_request *req)
+{
+	struct cachefiles_object *object =
+		container_of(req->object, struct cachefiles_object, fscache);
+	loff_t pos = req->pos;
+
+	_enter("%llx", pos);
+
+	read_lock_bh(&object->content_map_lock);
+
+	if (object->fscache.cookie->advice & FSCACHE_ADV_SINGLE_CHUNK) {
+		if (pos == 0) {
+			object->content_info = CACHEFILES_CONTENT_SINGLE;
+			set_bit(FSCACHE_OBJECT_NEEDS_UPDATE, &object->fscache.flags);
+		}
+	} else {
+		pgoff_t granule;
+		loff_t end = pos + req->len;
+
+		pos = round_down(pos, CACHEFILES_GRAN_SIZE);
+		do {
+			granule = pos / CACHEFILES_GRAN_SIZE;
+			if (granule / 8 >= object->content_map_size)
+				break;
+
+			set_bit_le(granule, object->content_map);
+			object->content_map_changed = true;
+			pos += CACHEFILES_GRAN_SIZE;
+
+		} while (pos < end);
+
+		if (object->content_info != CACHEFILES_CONTENT_MAP) {
+			object->content_info = CACHEFILES_CONTENT_MAP;
+			set_bit(FSCACHE_OBJECT_NEEDS_UPDATE, &object->fscache.flags);
+		}
+	}
+
+	read_unlock_bh(&object->content_map_lock);
+}
+
+/*
+ * Expand the content map to a larger file size.
+ */
+void cachefiles_expand_content_map(struct cachefiles_object *object, loff_t size)
+{
+	u8 *map, *zap;
+
+	/* Determine the size.  There's one bit per granule.  We size it in
+	 * terms of 8-byte chunks, where a 64-bit span * 256KiB bytes granules
+	 * covers 16MiB of file space.  At that, 512B will cover 1GiB.
+	 */
+	if (size > 0) {
+		size += CACHEFILES_GRAN_SIZE - 1;
+		size /= CACHEFILES_GRAN_SIZE;
+		size += 8 - 1;
+		size /= 8;
+		size = roundup_pow_of_two(size);
+	} else {
+		size = 8;
+	}
+
+	if (size <= object->content_map_size)
+		return;
+
+	map = kzalloc(size, GFP_KERNEL);
+	if (!map)
+		return;
+
+	write_lock_bh(&object->content_map_lock);
+	if (size > object->content_map_size) {
+		zap = object->content_map;
+		memcpy(map, zap, object->content_map_size);
+		object->content_map = map;
+		object->content_map_size = size;
+	} else {
+		zap = map;
+	}
+	write_unlock_bh(&object->content_map_lock);
+
+	kfree(zap);
+}
+
+/*
+ * Adjust the content map when we shorten a backing object.
+ *
+ * We need to unmark any granules that are going to be discarded.
+ */
+void cachefiles_shorten_content_map(struct cachefiles_object *object,
+				    loff_t new_size)
+{
+	struct fscache_cookie *cookie = object->fscache.cookie;
+	loff_t granule, o_granule;
+
+	if (object->fscache.cookie->advice & FSCACHE_ADV_SINGLE_CHUNK)
+		return;
+
+	write_lock_bh(&object->content_map_lock);
+
+	if (object->content_info == CACHEFILES_CONTENT_MAP) {
+		if (cookie->zero_point > new_size)
+			cookie->zero_point = new_size;
+
+		granule = new_size;
+		granule += CACHEFILES_GRAN_SIZE - 1;
+		granule /= CACHEFILES_GRAN_SIZE;
+
+		o_granule = cookie->object_size;
+		o_granule += CACHEFILES_GRAN_SIZE - 1;
+		o_granule /= CACHEFILES_GRAN_SIZE;
+
+		for (; o_granule > granule; o_granule--)
+			clear_bit_le(o_granule, object->content_map);
+	}
+
+	write_unlock_bh(&object->content_map_lock);
+}
+
+/*
+ * Load the content map.
+ */
+bool cachefiles_load_content_map(struct cachefiles_object *object)
+{
+	struct cachefiles_cache *cache = container_of(object->fscache.cache,
+						      struct cachefiles_cache, cache);
+	const struct cred *saved_cred;
+	ssize_t got;
+	loff_t size;
+	u8 *map = NULL;
+
+	_enter("c=%08x,%llx",
+	       object->fscache.cookie->debug_id,
+	       object->fscache.cookie->object_size);
+
+	object->content_info = CACHEFILES_CONTENT_NO_DATA;
+	if (object->fscache.cookie->advice & FSCACHE_ADV_SINGLE_CHUNK) {
+		/* Single-chunk object.  The presence or absence of the content
+		 * map xattr is sufficient indication.
+		 */
+		size = 0;
+	} else {
+		/* Granulated object.  There's one bit per granule.  We size it
+		 * in terms of 8-byte chunks, where a 64-bit span * 256KiB
+		 * bytes granules covers 16MiB of file space.  At that, 512B
+		 * will cover 1GiB.
+		 */
+		size = object->fscache.cookie->object_size;
+		if (size > 0) {
+			size += CACHEFILES_GRAN_SIZE - 1;
+			size /= CACHEFILES_GRAN_SIZE;
+			size += 8 - 1;
+			size /= 8;
+			if (size < 8)
+				size = 8;
+			size = roundup_pow_of_two(size);
+		} else {
+			size = 8;
+		}
+
+		map = kzalloc(size, GFP_KERNEL);
+		if (!map)
+			return false;
+	}
+
+	cachefiles_begin_secure(cache, &saved_cred);
+	got = vfs_getxattr(object->dentry, cachefiles_xattr_content_map,
+			   map, size);
+	cachefiles_end_secure(cache, saved_cred);
+	if (got < 0 && got != -ENODATA) {
+		kfree(map);
+		_leave(" = f [%zd]", got);
+		return false;
+	}
+
+	if (size == 0) {
+		if (got != -ENODATA)
+			object->content_info = CACHEFILES_CONTENT_SINGLE;
+		_leave(" = t [%zd]", got);
+	} else {
+		object->content_map = map;
+		object->content_map_size = size;
+		object->content_info = CACHEFILES_CONTENT_MAP;
+		_leave(" = t [%zd/%llu %*phN]", got, size, (int)size, map);
+	}
+
+	return true;
+}
+
+/*
+ * Save the content map.
+ */
+void cachefiles_save_content_map(struct cachefiles_object *object)
+{
+	ssize_t ret;
+	size_t size;
+	u8 *map;
+
+	_enter("c=%08x", object->fscache.cookie->debug_id);
+
+	if (object->content_info != CACHEFILES_CONTENT_MAP)
+		return;
+
+	size = object->content_map_size;
+	map = object->content_map;
+
+	/* Don't save trailing zeros, but do save at least one byte */
+	for (; size > 0; size--)
+		if (map[size - 1])
+			break;
+
+	ret = vfs_setxattr(object->dentry, cachefiles_xattr_content_map,
+			   map, size, 0);
+	if (ret < 0) {
+		cachefiles_io_error_obj(object, "Unable to set xattr");
+		return;
+	}
+
+	_leave(" = %zd", ret);
+}
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index bb68318fcc1c..d820051a9960 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -37,6 +37,7 @@ struct fscache_object *cachefiles_alloc_object(struct fscache_cookie *cookie,
 		return NULL;
 	}
 
+	rwlock_init(&object->content_map_lock);
 	fscache_object_init(&object->fscache, cookie, &cache->cache);
 	object->fscache.parent = parent;
 	object->fscache.stage = FSCACHE_OBJECT_STAGE_LOOKING_UP;
@@ -198,6 +199,8 @@ static void cachefiles_update_object(struct fscache_object *_object)
 static void cachefiles_commit_object(struct cachefiles_object *object,
 				     struct cachefiles_cache *cache)
 {
+	if (object->content_map_changed)
+		cachefiles_save_content_map(object);
 }
 
 /*
@@ -302,6 +305,8 @@ static void cachefiles_put_object(struct fscache_object *_object,
 		ASSERTCMP(object->dentry, ==, NULL);
 		ASSERTCMP(object->fscache.n_children, ==, 0);
 
+		kfree(object->content_map);
+
 		cache = object->fscache.cache;
 		fscache_object_destroy(&object->fscache);
 		kmem_cache_free(cachefiles_object_jar, object);
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
index e605cffc284d..c7a2a3442061 100644
--- a/fs/cachefiles/internal.h
+++ b/fs/cachefiles/internal.h
@@ -19,6 +19,11 @@
 #include <linux/workqueue.h>
 #include <linux/security.h>
 
+/* Cachefile granularity */
+#define CACHEFILES_GRAN_SIZE	(256 * 1024)
+#define CACHEFILES_GRAN_PAGES	(CACHEFILES_GRAN_SIZE / PAGE_SIZE)
+#define CACHEFILES_DIO_BLOCK_SIZE 4096
+
 struct cachefiles_cache;
 struct cachefiles_object;
 
@@ -29,6 +34,16 @@ extern unsigned cachefiles_debug;
 
 #define cachefiles_gfp (__GFP_RECLAIM | __GFP_NORETRY | __GFP_NOMEMALLOC)
 
+enum cachefiles_content {
+	/* These values are saved on disk */
+	CACHEFILES_CONTENT_NO_DATA	= 0, /* No content stored */
+	CACHEFILES_CONTENT_SINGLE	= 1, /* Content is monolithic, all is present */
+	CACHEFILES_CONTENT_ALL		= 2, /* Content is all present, no map */
+	CACHEFILES_CONTENT_MAP		= 3, /* Content is piecemeal, map in use */
+	CACHEFILES_CONTENT_DIRTY	= 4, /* Content is dirty (only seen on disk) */
+	nr__cachefiles_content
+};
+
 /*
  * node records
  */
@@ -41,6 +56,13 @@ struct cachefiles_object {
 	atomic_t			usage;		/* object usage count */
 	uint8_t				type;		/* object type */
 	bool				new;		/* T if object new */
+
+	/* Map of the content blocks in the object */
+	enum cachefiles_content		content_info:8;	/* Info about content presence */
+	bool				content_map_changed;
+	u8				*content_map;		/* Content present bitmap */
+	unsigned int			content_map_size;	/* Size of buffer */
+	rwlock_t			content_map_lock;
 };
 
 extern struct kmem_cache *cachefiles_object_jar;
@@ -100,6 +122,15 @@ static inline void cachefiles_state_changed(struct cachefiles_cache *cache)
 extern int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args);
 extern void cachefiles_daemon_unbind(struct cachefiles_cache *cache);
 
+/*
+ * content-map.c
+ */
+extern void cachefiles_mark_content_map(struct fscache_io_request *req);
+extern void cachefiles_expand_content_map(struct cachefiles_object *object, loff_t size);
+extern void cachefiles_shorten_content_map(struct cachefiles_object *object, loff_t new_size);
+extern bool cachefiles_load_content_map(struct cachefiles_object *object);
+extern void cachefiles_save_content_map(struct cachefiles_object *object);
+
 /*
  * daemon.c
  */
diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c
index 2384067e716d..642c3fd34809 100644
--- a/fs/cachefiles/io.c
+++ b/fs/cachefiles/io.c
@@ -62,6 +62,10 @@ bool cachefiles_open_object(struct cachefiles_object *object)
 	path.mnt = cache->mnt;
 	path.dentry = object->dentry;
 
+	if (object->content_info == CACHEFILES_CONTENT_MAP &&
+	    !cachefiles_load_content_map(object))
+		goto error;
+
 	file = open_with_fake_path(&path,
 				   O_RDWR | O_LARGEFILE | O_DIRECT,
 				   d_backing_inode(object->dentry),
diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c
index 17c16c2bd07e..a1d4a3d1db69 100644
--- a/fs/cachefiles/xattr.c
+++ b/fs/cachefiles/xattr.c
@@ -16,8 +16,11 @@
 #include "internal.h"
 
 struct cachefiles_xattr {
-	uint8_t				type;
-	uint8_t				data[];
+	__be64	object_size;	/* Actual size of the object */
+	__be64	zero_point;	/* Size after which server has no data not written by us */
+	__u8	type;		/* Type of object */
+	__u8	content;	/* Content presence (enum cachefiles_content) */
+	__u8	data[];		/* netfs coherency data */
 } __packed;
 
 static const char cachefiles_xattr_cache[] =
@@ -118,7 +121,10 @@ int cachefiles_set_object_xattr(struct cachefiles_object *object,
 	if (!buf)
 		return -ENOMEM;
 
-	buf->type = object->fscache.cookie->type;
+	buf->object_size	= cpu_to_be64(object->fscache.cookie->object_size);
+	buf->zero_point		= cpu_to_be64(object->fscache.cookie->zero_point);
+	buf->type		= object->fscache.cookie->type;
+	buf->content		= object->content_info;
 	if (len > 0)
 		memcpy(buf->data, fscache_get_aux(object->fscache.cookie), len);
 
@@ -127,7 +133,7 @@ int cachefiles_set_object_xattr(struct cachefiles_object *object,
 			   xattr_flags);
 	if (ret < 0) {
 		trace_cachefiles_coherency(object, d_inode(dentry)->i_ino,
-					   0,
+					   buf->content,
 					   cachefiles_coherency_set_fail);
 		if (ret != -ENOMEM)
 			cachefiles_io_error_obj(
@@ -135,7 +141,7 @@ int cachefiles_set_object_xattr(struct cachefiles_object *object,
 				"Failed to set xattr with error %d", ret);
 	} else {
 		trace_cachefiles_coherency(object, d_inode(dentry)->i_ino,
-					   0,
+					   buf->content,
 					   cachefiles_coherency_set_ok);
 	}
 
@@ -174,15 +180,21 @@ int cachefiles_check_auxdata(struct cachefiles_object *object)
 		why = cachefiles_coherency_check_xattr;
 	} else if (buf->type != object->fscache.cookie->type) {
 		why = cachefiles_coherency_check_type;
+	} else if (buf->content >= nr__cachefiles_content) {
+		why = cachefiles_coherency_check_content;
 	} else if (memcmp(buf->data, p, len) != 0) {
 		why = cachefiles_coherency_check_aux;
+	} else if (be64_to_cpu(buf->object_size) != object->fscache.cookie->object_size) {
+		why = cachefiles_coherency_check_objsize;
 	} else {
+		object->fscache.cookie->zero_point = be64_to_cpu(buf->zero_point);
+		object->content_info = buf->content;
 		why = cachefiles_coherency_check_ok;
 		ret = 0;
 	}
 
 	trace_cachefiles_coherency(object, d_inode(dentry)->i_ino,
-				   0, why);
+				   buf->content, why);
 	kfree(buf);
 	return ret;
 }
diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h
index bf588c3f4a07..e7af1d683009 100644
--- a/include/trace/events/cachefiles.h
+++ b/include/trace/events/cachefiles.h
@@ -324,7 +324,7 @@ TRACE_EVENT(cachefiles_mark_buried,
 TRACE_EVENT(cachefiles_coherency,
 	    TP_PROTO(struct cachefiles_object *obj,
 		     ino_t ino,
-		     int content,
+		     enum cachefiles_content content,
 		     enum cachefiles_coherency_trace why),
 
 	    TP_ARGS(obj, ino, content, why),
@@ -333,7 +333,7 @@ TRACE_EVENT(cachefiles_coherency,
 	    TP_STRUCT__entry(
 		    __field(unsigned int,			obj	)
 		    __field(enum cachefiles_coherency_trace,	why	)
-		    __field(int,				content	)
+		    __field(enum cachefiles_content,		content	)
 		    __field(u64,				ino	)
 			     ),
 



  parent reply	other threads:[~2020-05-04 17:12 UTC|newest]

Thread overview: 92+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-05-04 17:07 [RFC PATCH 00/61] fscache, cachefiles: Rewrite the I/O interface in terms of kiocb/iov_iter David Howells
2020-05-04 17:07 ` David Howells
2020-05-04 17:07 ` [RFC PATCH 01/61] afs: Make afs_zap_data() static David Howells
2020-05-04 17:07 ` [RFC PATCH 02/61] iov_iter: Add ITER_MAPPING David Howells
2020-05-04 17:07   ` David Howells
2020-05-04 17:07 ` [RFC PATCH 03/61] vm: Add wait/unlock functions for PG_fscache David Howells
2020-05-04 17:08 ` [RFC PATCH 04/61] vfs: Export rw_verify_area() for use by cachefiles David Howells
2020-05-04 17:08 ` [RFC PATCH 05/61] vfs: Provide S_CACHE_FILE inode flag David Howells
2020-05-04 17:08   ` David Howells
2020-05-04 17:08 ` [RFC PATCH 06/61] afs: Disable use of the fscache I/O routines David Howells
2020-05-04 17:08 ` [RFC PATCH 07/61] fscache: Add a cookie debug ID and use that in traces David Howells
2020-05-04 17:08 ` [RFC PATCH 08/61] fscache: Procfile to display cookies David Howells
2020-05-04 17:08 ` [RFC PATCH 09/61] fscache: Temporarily disable network filesystems' use of fscache David Howells
2020-05-04 17:08 ` [RFC PATCH 10/61] fscache: Remove the old I/O API David Howells
2020-05-04 17:08   ` David Howells
2020-05-04 17:09 ` [RFC PATCH 11/61] fscache: Remove the netfs data from the cookie David Howells
2020-05-04 17:09 ` [RFC PATCH 12/61] fscache: Remove struct fscache_cookie_def David Howells
2020-05-04 17:09   ` David Howells
2020-05-04 17:09 ` [RFC PATCH 13/61] fscache: Remove store_limit* from struct fscache_object David Howells
2020-05-04 17:09 ` [RFC PATCH 14/61] fscache: Remove fscache_check_consistency() David Howells
2020-05-04 17:09   ` David Howells
2020-05-04 17:09 ` [RFC PATCH 15/61] fscache: Remove fscache_attr_changed() David Howells
2020-05-04 17:09 ` [RFC PATCH 16/61] fscache: Remove obsolete stats David Howells
2020-05-04 17:09   ` David Howells
2020-05-04 17:10 ` [RFC PATCH 17/61] fscache: Remove old I/O tracepoints David Howells
2020-05-04 17:10 ` [RFC PATCH 18/61] fscache: Temporarily disable fscache_invalidate() David Howells
2020-05-04 17:10 ` [RFC PATCH 19/61] fscache: Remove the I/O operation manager David Howells
2020-05-04 17:10   ` David Howells
2020-05-04 17:10 ` [RFC PATCH 20/61] cachefiles: Remove tree of active files and use S_CACHE_FILE inode flag David Howells
2020-05-04 17:10   ` David Howells
2020-05-04 17:10 ` [RFC PATCH 21/61] fscache: Provide a simple thread pool for running ops asynchronously David Howells
2020-05-04 17:10   ` David Howells
2020-05-04 17:10 ` [RFC PATCH 22/61] fscache: Replace the object management state machine David Howells
2020-05-04 17:10 ` [RFC PATCH 23/61] fscache: Rewrite the I/O API based on iov_iter David Howells
2020-05-04 17:11 ` [RFC PATCH 24/61] fscache: Remove fscache_wait_on_invalidate() David Howells
2020-05-04 17:11   ` David Howells
2020-05-04 17:11 ` [RFC PATCH 25/61] fscache: Keep track of size of a file last set independently on the server David Howells
2020-05-04 17:11 ` [RFC PATCH 26/61] fscache, cachefiles: Fix disabled histogram warnings David Howells
2020-05-04 17:11 ` [RFC PATCH 27/61] fscache: Recast assertion in terms of cookie not being an index David Howells
2020-05-04 17:11 ` [RFC PATCH 28/61] cachefiles: Remove some redundant checks on unsigned values David Howells
2020-05-04 17:11 ` [RFC PATCH 29/61] cachefiles: trace: Log coherency checks David Howells
2020-05-04 17:12 ` [RFC PATCH 30/61] cachefiles: Split cachefiles_drop_object() up a bit David Howells
2020-05-04 17:12   ` David Howells
2020-05-04 17:12 ` [RFC PATCH 31/61] cachefiles: Implement new fscache I/O backend API David Howells
2020-05-04 17:12 ` [RFC PATCH 32/61] cachefiles: Merge object->backer into object->dentry David Howells
2020-05-04 17:12   ` David Howells
2020-05-04 17:12 ` David Howells [this message]
2020-05-04 17:12 ` [RFC PATCH 34/61] cachefiles: Implement extent shaper David Howells
2020-05-04 17:12 ` [RFC PATCH 35/61] cachefiles: Round the cachefile size up to DIO block size David Howells
2020-05-04 17:12 ` [RFC PATCH 36/61] cachefiles: Implement read and write parts of new I/O API David Howells
2020-05-04 17:13 ` [RFC PATCH 37/61] cachefiles: Add I/O tracepoints David Howells
2020-05-04 17:13 ` [RFC PATCH 38/61] fscache: Add read helper David Howells
2020-05-04 17:13   ` David Howells
2020-05-04 17:13 ` [RFC PATCH 39/61] fscache: Display cache-specific data in /proc/fs/fscache/objects David Howells
2020-05-04 17:13   ` David Howells
2020-05-04 17:13 ` [RFC PATCH 40/61] fscache: Remove more obsolete stats David Howells
2020-05-04 17:13 ` [RFC PATCH 41/61] fscache: New stats David Howells
2020-05-04 17:13 ` [RFC PATCH 42/61] fscache, cachefiles: Rewrite invalidation David Howells
2020-05-04 17:13 ` [RFC PATCH 43/61] fscache: Implement "will_modify" parameter on fscache_use_cookie() David Howells
2020-05-04 17:14 ` [RFC PATCH 44/61] fscache: Provide resize operation David Howells
2020-05-04 17:14   ` David Howells
2020-05-04 17:14 ` [RFC PATCH 45/61] fscache: Remove the update operation David Howells
2020-05-04 17:14   ` David Howells
2020-05-04 17:14 ` [RFC PATCH 46/61] cachefiles: Shape write requests David Howells
2020-05-04 17:14 ` [RFC PATCH 47/61] afs: Remove afs_zero_fid as it's not used David Howells
2020-05-04 17:14 ` [RFC PATCH 48/61] afs: Move key to afs_read struct David Howells
2020-05-04 17:14   ` David Howells
2020-05-04 17:14 ` [RFC PATCH 49/61] afs: Don't truncate iter during data fetch David Howells
2020-05-04 17:15 ` [RFC PATCH 50/61] afs: Set up the iov_iter before calling afs_extract_data() David Howells
2020-05-04 17:15   ` David Howells
2020-05-04 17:15 ` [RFC PATCH 51/61] afs: Use ITER_MAPPING for writing David Howells
2020-05-04 17:15 ` [RFC PATCH 52/61] afs: Interpose struct fscache_io_request into struct afs_read David Howells
2020-05-04 17:15   ` David Howells
2020-05-04 17:15 ` [RFC PATCH 53/61] afs: Note the amount transferred in fetch-data delivery David Howells
2020-05-04 17:15 ` [RFC PATCH 54/61] afs: Wait on PG_fscache before modifying/releasing a page David Howells
2020-05-05 11:59   ` Matthew Wilcox
2020-05-06  7:57   ` David Howells
2020-05-06 11:09     ` Matthew Wilcox
2020-05-06 14:24     ` David Howells
2020-05-08 14:39     ` David Howells
2020-05-08 14:39       ` David Howells
2020-05-04 17:15 ` [RFC PATCH 55/61] afs: Use new fscache I/O API David Howells
2020-05-04 17:15 ` [RFC PATCH 56/61] afs: Copy local writes to the cache when writing to the server David Howells
2020-05-04 17:16 ` [RFC PATCH 57/61] afs: Invoke fscache_resize_cookie() when handling ATTR_SIZE for setattr David Howells
2020-05-04 17:16 ` [RFC PATCH 58/61] fscache: Rewrite the main document David Howells
2020-05-04 17:16 ` [RFC PATCH 59/61] fscache: Remove the obsolete API bits from the documentation David Howells
2020-05-04 17:16 ` [RFC PATCH 60/61] fscache: Document the new netfs API David Howells
2020-05-04 17:16 ` [RFC PATCH 61/61] fscache: Document the rewritten cache backend API David Howells
2020-05-04 17:54 ` [RFC PATCH 00/61] fscache, cachefiles: Rewrite the I/O interface in terms of kiocb/iov_iter Jeff Layton
2020-05-04 17:54   ` Jeff Layton
2020-05-05  6:05 ` Christoph Hellwig
2020-05-05  6:05   ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=158861234745.340223.12641191192775836182.stgit@warthog.procyon.org.uk \
    --to=dhowells@redhat.com \
    --cc=anna.schumaker@netapp.com \
    --cc=ceph-devel@vger.kernel.org \
    --cc=jlayton@redhat.com \
    --cc=linux-afs@lists.infradead.org \
    --cc=linux-cifs@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-nfs@vger.kernel.org \
    --cc=sfrench@samba.org \
    --cc=trondmy@hammerspace.com \
    --cc=v9fs-developer@lists.sourceforge.net \
    --cc=viro@zeniv.linux.org.uk \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.