ceph-devel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: xiubli@redhat.com
To: idryomov@gmail.com, ceph-devel@vger.kernel.org
Cc: jlayton@kernel.org, lhenriques@suse.de, vshankar@redhat.com,
	mchangir@redhat.com, Xiubo Li <xiubli@redhat.com>
Subject: [PATCH v16 04/68] libceph: add sparse read support to OSD client
Date: Mon, 27 Feb 2023 11:27:09 +0800	[thread overview]
Message-ID: <20230227032813.337906-5-xiubli@redhat.com> (raw)
In-Reply-To: <20230227032813.337906-1-xiubli@redhat.com>

From: Jeff Layton <jlayton@kernel.org>

Have get_reply check for the presence of sparse read ops in the
request and set the sparse_read boolean in the msg. That will queue the
messenger layer to use the sparse read codepath instead of the normal
data receive.

Add a new sparse_read operation for the OSD client, driven by its own
state machine. The messenger will repeatedly call the sparse_read
operation, and it will pass back the necessary info to set up to read
the next extent of data, while zero-filling the sparse regions.

The state machine will stop at the end of the last extent, and will
attach the extent map buffer to the ceph_osd_req_op so that the caller
can use it.

Reviewed-by: Xiubo Li <xiubli@redhat.com>
Signed-off-by: Jeff Layton <jlayton@kernel.org>
---
 include/linux/ceph/osd_client.h |  32 ++++
 net/ceph/osd_client.c           | 255 +++++++++++++++++++++++++++++++-
 2 files changed, 283 insertions(+), 4 deletions(-)

diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 05da1e755b7b..460881c93f9a 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -40,6 +40,36 @@ struct ceph_sparse_extent {
 	u64	len;
 } __packed;
 
+/* Sparse read state machine state values */
+enum ceph_sparse_read_state {
+	CEPH_SPARSE_READ_HDR	= 0,
+	CEPH_SPARSE_READ_EXTENTS,
+	CEPH_SPARSE_READ_DATA_LEN,
+	CEPH_SPARSE_READ_DATA,
+};
+
+/*
+ * A SPARSE_READ reply is a 32-bit count of extents, followed by an array of
+ * 64-bit offset/length pairs, and then all of the actual file data
+ * concatenated after it (sans holes).
+ *
+ * Unfortunately, we don't know how long the extent array is until we've
+ * started reading the data section of the reply. The caller should send down
+ * a destination buffer for the array, but we'll alloc one if it's too small
+ * or if the caller doesn't.
+ */
+struct ceph_sparse_read {
+	enum ceph_sparse_read_state	sr_state;	/* state machine state */
+	u64				sr_req_off;	/* orig request offset */
+	u64				sr_req_len;	/* orig request length */
+	u64				sr_pos;		/* current pos in buffer */
+	int				sr_index;	/* current extent index */
+	__le32				sr_datalen;	/* length of actual data */
+	u32				sr_count;	/* extent count in reply */
+	int				sr_ext_len;	/* length of extent array */
+	struct ceph_sparse_extent	*sr_extent;	/* extent array */
+};
+
 /*
  * A given osd we're communicating with.
  *
@@ -48,6 +78,7 @@ struct ceph_sparse_extent {
  */
 struct ceph_osd {
 	refcount_t o_ref;
+	int o_sparse_op_idx;
 	struct ceph_osd_client *o_osdc;
 	int o_osd;
 	int o_incarnation;
@@ -63,6 +94,7 @@ struct ceph_osd {
 	unsigned long lru_ttl;
 	struct list_head o_keepalive_item;
 	struct mutex lock;
+	struct ceph_sparse_read	o_sparse_read;
 };
 
 #define CEPH_OSD_SLAB_OPS	2
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index e2f1d1dcbb84..8534ca9c39b9 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -376,6 +376,7 @@ static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
 
 	switch (op->op) {
 	case CEPH_OSD_OP_READ:
+	case CEPH_OSD_OP_SPARSE_READ:
 	case CEPH_OSD_OP_WRITE:
 	case CEPH_OSD_OP_WRITEFULL:
 		kfree(op->extent.sparse_ext);
@@ -670,6 +671,7 @@ static void get_num_data_items(struct ceph_osd_request *req,
 		/* reply */
 		case CEPH_OSD_OP_STAT:
 		case CEPH_OSD_OP_READ:
+		case CEPH_OSD_OP_SPARSE_READ:
 		case CEPH_OSD_OP_LIST_WATCHERS:
 			*num_reply_data_items += 1;
 			break;
@@ -739,7 +741,7 @@ void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
 
 	BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
 	       opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
-	       opcode != CEPH_OSD_OP_TRUNCATE);
+	       opcode != CEPH_OSD_OP_TRUNCATE && opcode != CEPH_OSD_OP_SPARSE_READ);
 
 	op->extent.offset = offset;
 	op->extent.length = length;
@@ -964,6 +966,7 @@ static u32 osd_req_encode_op(struct ceph_osd_op *dst,
 	case CEPH_OSD_OP_STAT:
 		break;
 	case CEPH_OSD_OP_READ:
+	case CEPH_OSD_OP_SPARSE_READ:
 	case CEPH_OSD_OP_WRITE:
 	case CEPH_OSD_OP_WRITEFULL:
 	case CEPH_OSD_OP_ZERO:
@@ -1060,7 +1063,8 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
 
 	BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
 	       opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
-	       opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
+	       opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE &&
+	       opcode != CEPH_OSD_OP_SPARSE_READ);
 
 	req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
 					GFP_NOFS);
@@ -1201,6 +1205,13 @@ static void osd_init(struct ceph_osd *osd)
 	mutex_init(&osd->lock);
 }
 
+static void ceph_init_sparse_read(struct ceph_sparse_read *sr)
+{
+	kfree(sr->sr_extent);
+	memset(sr, '\0', sizeof(*sr));
+	sr->sr_state = CEPH_SPARSE_READ_HDR;
+}
+
 static void osd_cleanup(struct ceph_osd *osd)
 {
 	WARN_ON(!RB_EMPTY_NODE(&osd->o_node));
@@ -1211,6 +1222,8 @@ static void osd_cleanup(struct ceph_osd *osd)
 	WARN_ON(!list_empty(&osd->o_osd_lru));
 	WARN_ON(!list_empty(&osd->o_keepalive_item));
 
+	ceph_init_sparse_read(&osd->o_sparse_read);
+
 	if (osd->o_auth.authorizer) {
 		WARN_ON(osd_homeless(osd));
 		ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
@@ -1230,6 +1243,9 @@ static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
 	osd_init(osd);
 	osd->o_osdc = osdc;
 	osd->o_osd = onum;
+	osd->o_sparse_op_idx = -1;
+
+	ceph_init_sparse_read(&osd->o_sparse_read);
 
 	ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
 
@@ -2034,6 +2050,7 @@ static void setup_request_data(struct ceph_osd_request *req)
 					       &op->raw_data_in);
 			break;
 		case CEPH_OSD_OP_READ:
+		case CEPH_OSD_OP_SPARSE_READ:
 			ceph_osdc_msg_data_add(reply_msg,
 					       &op->extent.osd_data);
 			break;
@@ -2453,8 +2470,10 @@ static void finish_request(struct ceph_osd_request *req)
 
 	req->r_end_latency = ktime_get();
 
-	if (req->r_osd)
+	if (req->r_osd) {
+		ceph_init_sparse_read(&req->r_osd->o_sparse_read);
 		unlink_request(req->r_osd, req);
+	}
 	atomic_dec(&osdc->num_requests);
 
 	/*
@@ -5358,6 +5377,24 @@ static void osd_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
 	ceph_msg_put(msg);
 }
 
+/* How much sparse data was requested? */
+static u64 sparse_data_requested(struct ceph_osd_request *req)
+{
+	u64 len = 0;
+
+	if (req->r_flags & CEPH_OSD_FLAG_READ) {
+		int i;
+
+		for (i = 0; i < req->r_num_ops; ++i) {
+			struct ceph_osd_req_op *op = &req->r_ops[i];
+
+			if (op->op == CEPH_OSD_OP_SPARSE_READ)
+				len += op->extent.length;
+		}
+	}
+	return len;
+}
+
 /*
  * Lookup and return message for incoming reply.  Don't try to do
  * anything about a larger than preallocated data portion of the
@@ -5374,6 +5411,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
 	int front_len = le32_to_cpu(hdr->front_len);
 	int data_len = le32_to_cpu(hdr->data_len);
 	u64 tid = le64_to_cpu(hdr->tid);
+	u64 srlen;
 
 	down_read(&osdc->lock);
 	if (!osd_registered(osd)) {
@@ -5406,7 +5444,8 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
 		req->r_reply = m;
 	}
 
-	if (data_len > req->r_reply->data_length) {
+	srlen = sparse_data_requested(req);
+	if (!srlen && data_len > req->r_reply->data_length) {
 		pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
 			__func__, osd->o_osd, req->r_tid, data_len,
 			req->r_reply->data_length);
@@ -5416,6 +5455,8 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
 	}
 
 	m = ceph_msg_get(req->r_reply);
+	m->sparse_read = (bool)srlen;
+
 	dout("get_reply tid %lld %p\n", tid, m);
 
 out_unlock_session:
@@ -5648,9 +5689,215 @@ static int osd_check_message_signature(struct ceph_msg *msg)
 	return ceph_auth_check_message_signature(auth, msg);
 }
 
+static void advance_cursor(struct ceph_msg_data_cursor *cursor, size_t len, bool zero)
+{
+	while (len) {
+		struct page *page;
+		size_t poff, plen;
+
+		page = ceph_msg_data_next(cursor, &poff, &plen);
+		if (plen > len)
+			plen = len;
+		if (zero)
+			zero_user_segment(page, poff, poff + plen);
+		len -= plen;
+		ceph_msg_data_advance(cursor, plen);
+	}
+}
+
+static int prep_next_sparse_read(struct ceph_connection *con,
+				 struct ceph_msg_data_cursor *cursor)
+{
+	struct ceph_osd *o = con->private;
+	struct ceph_sparse_read *sr = &o->o_sparse_read;
+	struct ceph_osd_request *req;
+	struct ceph_osd_req_op *op;
+
+	spin_lock(&o->o_requests_lock);
+	req = lookup_request(&o->o_requests, le64_to_cpu(con->in_msg->hdr.tid));
+	if (!req) {
+		spin_unlock(&o->o_requests_lock);
+		return -EBADR;
+	}
+
+	if (o->o_sparse_op_idx < 0) {
+		u64 srlen = sparse_data_requested(req);
+
+		dout("%s: [%d] starting new sparse read req. srlen=0x%llx\n",
+		     __func__, o->o_osd, srlen);
+		ceph_msg_data_cursor_init(cursor, con->in_msg, srlen);
+	} else {
+		u64 end;
+
+		op = &req->r_ops[o->o_sparse_op_idx];
+
+		WARN_ON_ONCE(op->extent.sparse_ext);
+
+		/* hand back buffer we took earlier */
+		op->extent.sparse_ext = sr->sr_extent;
+		sr->sr_extent = NULL;
+		op->extent.sparse_ext_cnt = sr->sr_count;
+		sr->sr_ext_len = 0;
+		dout("%s: [%d] completed extent array len %d cursor->resid %zd\n",
+		     __func__, o->o_osd, op->extent.sparse_ext_cnt, cursor->resid);
+		/* Advance to end of data for this operation */
+		end = ceph_sparse_ext_map_end(op);
+		if (end < sr->sr_req_len)
+			advance_cursor(cursor, sr->sr_req_len - end, false);
+	}
+
+	ceph_init_sparse_read(sr);
+
+	/* find next op in this request (if any) */
+	while (++o->o_sparse_op_idx < req->r_num_ops) {
+		op = &req->r_ops[o->o_sparse_op_idx];
+		if (op->op == CEPH_OSD_OP_SPARSE_READ)
+			goto found;
+	}
+
+	/* reset for next sparse read request */
+	spin_unlock(&o->o_requests_lock);
+	o->o_sparse_op_idx = -1;
+	return 0;
+found:
+	sr->sr_req_off = op->extent.offset;
+	sr->sr_req_len = op->extent.length;
+	sr->sr_pos = sr->sr_req_off;
+	dout("%s: [%d] new sparse read op at idx %d 0x%llx~0x%llx\n", __func__,
+	     o->o_osd, o->o_sparse_op_idx, sr->sr_req_off, sr->sr_req_len);
+
+	/* hand off request's sparse extent map buffer */
+	sr->sr_ext_len = op->extent.sparse_ext_cnt;
+	op->extent.sparse_ext_cnt = 0;
+	sr->sr_extent = op->extent.sparse_ext;
+	op->extent.sparse_ext = NULL;
+
+	spin_unlock(&o->o_requests_lock);
+	return 1;
+}
+
+#ifdef __BIG_ENDIAN
+static inline void convert_extent_map(struct ceph_sparse_read *sr)
+{
+	int i;
+
+	for (i = 0; i < sr->sr_count; i++) {
+		struct ceph_sparse_extent *ext = &sr->sr_extent[i];
+
+		ext->off = le64_to_cpu((__force __le64)ext->off);
+		ext->len = le64_to_cpu((__force __le64)ext->len);
+	}
+}
+#else
+static inline void convert_extent_map(struct ceph_sparse_read *sr)
+{
+}
+#endif
+
+#define MAX_EXTENTS 4096
+
+static int osd_sparse_read(struct ceph_connection *con,
+			   struct ceph_msg_data_cursor *cursor,
+			   char **pbuf)
+{
+	struct ceph_osd *o = con->private;
+	struct ceph_sparse_read *sr = &o->o_sparse_read;
+	u32 count = sr->sr_count;
+	u64 eoff, elen;
+	int ret;
+
+	switch (sr->sr_state) {
+	case CEPH_SPARSE_READ_HDR:
+next_op:
+		ret = prep_next_sparse_read(con, cursor);
+		if (ret <= 0)
+			return ret;
+
+		/* number of extents */
+		ret = sizeof(sr->sr_count);
+		*pbuf = (char *)&sr->sr_count;
+		sr->sr_state = CEPH_SPARSE_READ_EXTENTS;
+		break;
+	case CEPH_SPARSE_READ_EXTENTS:
+		/* Convert sr_count to host-endian */
+		count = le32_to_cpu((__force __le32)sr->sr_count);
+		sr->sr_count = count;
+		dout("[%d] got %u extents\n", o->o_osd, count);
+
+		if (count > 0) {
+			if (!sr->sr_extent || count > sr->sr_ext_len) {
+				/*
+				 * Apply a hard cap to the number of extents.
+				 * If we have more, assume something is wrong.
+				 */
+				if (count > MAX_EXTENTS) {
+					dout("%s: OSD returned 0x%x extents in a single reply!\n",
+						  __func__, count);
+					return -EREMOTEIO;
+				}
+
+				/* no extent array provided, or too short */
+				kfree(sr->sr_extent);
+				sr->sr_extent = kmalloc_array(count,
+							      sizeof(*sr->sr_extent),
+							      GFP_NOIO);
+				if (!sr->sr_extent)
+					return -ENOMEM;
+				sr->sr_ext_len = count;
+			}
+			ret = count * sizeof(*sr->sr_extent);
+			*pbuf = (char *)sr->sr_extent;
+			sr->sr_state = CEPH_SPARSE_READ_DATA_LEN;
+			break;
+		}
+		/* No extents? Read data len */
+		fallthrough;
+	case CEPH_SPARSE_READ_DATA_LEN:
+		convert_extent_map(sr);
+		ret = sizeof(sr->sr_datalen);
+		*pbuf = (char *)&sr->sr_datalen;
+		sr->sr_state = CEPH_SPARSE_READ_DATA;
+		break;
+	case CEPH_SPARSE_READ_DATA:
+		if (sr->sr_index >= count) {
+			sr->sr_state = CEPH_SPARSE_READ_HDR;
+			goto next_op;
+		}
+
+		eoff = sr->sr_extent[sr->sr_index].off;
+		elen = sr->sr_extent[sr->sr_index].len;
+
+		dout("[%d] ext %d off 0x%llx len 0x%llx\n",
+		     o->o_osd, sr->sr_index, eoff, elen);
+
+		if (elen > INT_MAX) {
+			dout("Sparse read extent length too long (0x%llx)\n", elen);
+			return -EREMOTEIO;
+		}
+
+		/* zero out anything from sr_pos to start of extent */
+		if (sr->sr_pos < eoff)
+			advance_cursor(cursor, eoff - sr->sr_pos, true);
+
+		/* Set position to end of extent */
+		sr->sr_pos = eoff + elen;
+
+		/* send back the new length and nullify the ptr */
+		cursor->sr_resid = elen;
+		ret = elen;
+		*pbuf = NULL;
+
+		/* Bump the array index */
+		++sr->sr_index;
+		break;
+	}
+	return ret;
+}
+
 static const struct ceph_connection_operations osd_con_ops = {
 	.get = osd_get_con,
 	.put = osd_put_con,
+	.sparse_read = osd_sparse_read,
 	.alloc_msg = osd_alloc_msg,
 	.dispatch = osd_dispatch,
 	.fault = osd_fault,
-- 
2.31.1


  parent reply	other threads:[~2023-02-27  3:29 UTC|newest]

Thread overview: 84+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-02-27  3:27 [PATCH v16 00/68] ceph+fscrypt: full support xiubli
2023-02-27  3:27 ` [PATCH v16 01/68] libceph: add spinlock around osd->o_requests xiubli
2023-02-27  3:27 ` [PATCH v16 02/68] libceph: define struct ceph_sparse_extent and add some helpers xiubli
2023-02-27  3:27 ` [PATCH v16 03/68] libceph: add sparse read support to msgr2 crc state machine xiubli
2023-02-27  3:27 ` xiubli [this message]
2023-02-27  3:27 ` [PATCH v16 05/68] libceph: support sparse reads on msgr2 secure codepath xiubli
2023-02-27  3:27 ` [PATCH v16 06/68] libceph: add sparse read support to msgr1 xiubli
2023-02-27  3:27 ` [PATCH v16 07/68] ceph: add new mount option to enable sparse reads xiubli
2023-02-27  3:27 ` [PATCH v16 08/68] ceph: preallocate inode for ops that may create one xiubli
2023-02-27  3:27 ` [PATCH v16 09/68] ceph: make ceph_msdc_build_path use ref-walk xiubli
2023-02-27  3:27 ` [PATCH v16 10/68] libceph: add new iov_iter-based ceph_msg_data_type and ceph_osd_data_type xiubli
2023-02-27  3:27 ` [PATCH v16 11/68] ceph: use osd_req_op_extent_osd_iter for netfs reads xiubli
2023-02-27  3:27 ` [PATCH v16 12/68] ceph: fscrypt_auth handling for ceph xiubli
2023-02-27  3:27 ` [PATCH v16 13/68] ceph: ensure that we accept a new context from MDS for new inodes xiubli
2023-02-27  3:27 ` [PATCH v16 14/68] ceph: add support for fscrypt_auth/fscrypt_file to cap messages xiubli
2023-02-27  3:27 ` [PATCH v16 15/68] ceph: implement -o test_dummy_encryption mount option xiubli
2023-02-27  3:27 ` [PATCH v16 16/68] ceph: decode alternate_name in lease info xiubli
2023-02-27  3:27 ` [PATCH v16 17/68] ceph: add fscrypt ioctls xiubli
2023-02-27  3:27 ` [PATCH v16 18/68] ceph: make the ioctl cmd more readable in debug log xiubli
2023-02-27  3:27 ` [PATCH v16 19/68] ceph: add base64 endcoding routines for encrypted names xiubli
2023-02-27  3:27 ` [PATCH v16 20/68] ceph: add encrypted fname handling to ceph_mdsc_build_path xiubli
2023-02-27  3:27 ` [PATCH v16 21/68] ceph: send altname in MClientRequest xiubli
2023-02-27  3:27 ` [PATCH v16 22/68] ceph: encode encrypted name in dentry release xiubli
2023-02-27  3:27 ` [PATCH v16 23/68] ceph: properly set DCACHE_NOKEY_NAME flag in lookup xiubli
2023-02-27  3:27 ` [PATCH v16 24/68] ceph: set DCACHE_NOKEY_NAME in atomic open xiubli
2023-02-27  3:27 ` [PATCH v16 25/68] ceph: make d_revalidate call fscrypt revalidator for encrypted dentries xiubli
2023-03-07 18:53   ` Luís Henriques
2023-03-08  1:50     ` Xiubo Li
2023-03-08  9:29       ` Luís Henriques
2023-03-08 10:42         ` Xiubo Li
2023-03-08 17:14           ` Luís Henriques
2023-03-08 17:54             ` Jeff Layton
2023-03-08 18:30               ` Luís Henriques
2023-03-08 19:32                 ` Jeff Layton
2023-03-09  9:52                   ` Luís Henriques
2023-03-09  7:06             ` Xiubo Li
2023-03-09  9:55               ` Luís Henriques
2023-03-09 11:41                 ` Xiubo Li
2023-02-27  3:27 ` [PATCH v16 26/68] ceph: add helpers for converting names for userland presentation xiubli
2023-02-27  3:27 ` [PATCH v16 27/68] ceph: fix base64 encoded name's length check in ceph_fname_to_usr() xiubli
2023-02-27  3:27 ` [PATCH v16 28/68] ceph: add fscrypt support to ceph_fill_trace xiubli
2023-02-27  3:27 ` [PATCH v16 29/68] ceph: pass the request to parse_reply_info_readdir() xiubli
2023-02-27  3:27 ` [PATCH v16 30/68] ceph: add ceph_encode_encrypted_dname() helper xiubli
2023-02-27  3:27 ` [PATCH v16 31/68] ceph: add support to readdir for encrypted filenames xiubli
2023-02-27  3:27 ` [PATCH v16 32/68] ceph: create symlinks with encrypted and base64-encoded targets xiubli
2023-02-27  3:27 ` [PATCH v16 33/68] ceph: make ceph_get_name decrypt filenames xiubli
2023-02-27  3:27 ` [PATCH v16 34/68] ceph: add a new ceph.fscrypt.auth vxattr xiubli
2023-02-27  3:27 ` [PATCH v16 35/68] ceph: add some fscrypt guardrails xiubli
2023-02-27  3:27 ` [PATCH v16 36/68] ceph: allow encrypting a directory while not having Ax caps xiubli
2023-02-27  3:27 ` [PATCH v16 37/68] ceph: mark directory as non-complete after loading key xiubli
2023-02-27  3:27 ` [PATCH v16 38/68] ceph: don't allow changing layout on encrypted files/directories xiubli
2023-02-27  3:27 ` [PATCH v16 39/68] libceph: add CEPH_OSD_OP_ASSERT_VER support xiubli
2023-02-27  3:27 ` [PATCH v16 40/68] ceph: size handling for encrypted inodes in cap updates xiubli
2023-02-27  3:27 ` [PATCH v16 41/68] ceph: fscrypt_file field handling in MClientRequest messages xiubli
2023-02-27  3:27 ` [PATCH v16 42/68] ceph: get file size from fscrypt_file when present in inode traces xiubli
2023-02-27  3:27 ` [PATCH v16 43/68] ceph: handle fscrypt fields in cap messages from MDS xiubli
2023-02-27  3:27 ` [PATCH v16 44/68] ceph: update WARN_ON message to pr_warn xiubli
2023-02-27  3:27 ` [PATCH v16 45/68] ceph: add __ceph_get_caps helper support xiubli
2023-02-27  3:27 ` [PATCH v16 46/68] ceph: add __ceph_sync_read " xiubli
2023-02-27  3:27 ` [PATCH v16 47/68] ceph: add object version support for sync read xiubli
2023-02-27  3:27 ` [PATCH v16 48/68] ceph: add infrastructure for file encryption and decryption xiubli
2023-02-27  3:27 ` [PATCH v16 49/68] ceph: add truncate size handling support for fscrypt xiubli
2023-02-27  3:27 ` [PATCH v16 50/68] libceph: allow ceph_osdc_new_request to accept a multi-op read xiubli
2023-02-27  3:27 ` [PATCH v16 51/68] ceph: disable fallocate for encrypted inodes xiubli
2023-02-27  3:27 ` [PATCH v16 52/68] ceph: disable copy offload on " xiubli
2023-02-27  3:27 ` [PATCH v16 53/68] ceph: don't use special DIO path for " xiubli
2023-02-27  3:27 ` [PATCH v16 54/68] ceph: align data in pages in ceph_sync_write xiubli
2023-02-27  3:28 ` [PATCH v16 55/68] ceph: add read/modify/write to ceph_sync_write xiubli
2023-02-27  3:28 ` [PATCH v16 56/68] ceph: plumb in decryption during sync reads xiubli
2023-02-27  3:28 ` [PATCH v16 57/68] ceph: add fscrypt decryption support to ceph_netfs_issue_op xiubli
2023-02-27  3:28 ` [PATCH v16 58/68] ceph: set i_blkbits to crypto block size for encrypted inodes xiubli
2023-02-27  3:28 ` [PATCH v16 59/68] ceph: add encryption support to writepage xiubli
2023-02-27  3:28 ` [PATCH v16 60/68] ceph: fscrypt support for writepages xiubli
2023-02-27  3:28 ` [PATCH v16 61/68] ceph: invalidate pages when doing direct/sync writes xiubli
2023-02-27  3:28 ` [PATCH v16 62/68] ceph: add support for encrypted snapshot names xiubli
2023-02-27  3:28 ` [PATCH v16 63/68] ceph: add support for handling " xiubli
2023-02-27  3:28 ` [PATCH v16 64/68] ceph: update documentation regarding snapshot naming limitations xiubli
2023-02-27  3:28 ` [PATCH v16 65/68] ceph: prevent snapshots to be created in encrypted locked directories xiubli
2023-02-27  3:28 ` [PATCH v16 66/68] ceph: report STATX_ATTR_ENCRYPTED on encrypted inodes xiubli
2023-02-27  3:28 ` [PATCH v16 67/68] libceph: defer removing the req from osdc just after req->r_callback xiubli
2023-02-27  3:28 ` [PATCH v16 68/68] ceph: drop the messages from MDS when unmounting xiubli
2023-02-27  9:27 ` [PATCH v16 00/68] ceph+fscrypt: full support Luís Henriques
2023-02-27  9:58   ` Xiubo Li
2023-02-27 10:30     ` Luís Henriques

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230227032813.337906-5-xiubli@redhat.com \
    --to=xiubli@redhat.com \
    --cc=ceph-devel@vger.kernel.org \
    --cc=idryomov@gmail.com \
    --cc=jlayton@kernel.org \
    --cc=lhenriques@suse.de \
    --cc=mchangir@redhat.com \
    --cc=vshankar@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).