All of lore.kernel.org
 help / color / mirror / Atom feed
From: xiubli@redhat.com
To: idryomov@gmail.com, ceph-devel@vger.kernel.org
Cc: jlayton@kernel.org, vshankar@redhat.com, lhenriques@suse.de,
	mchangir@redhat.com, Xiubo Li <xiubli@redhat.com>
Subject: [PATCH v19 04/70] libceph: add sparse read support to OSD client
Date: Mon, 17 Apr 2023 11:25:48 +0800	[thread overview]
Message-ID: <20230417032654.32352-5-xiubli@redhat.com> (raw)
In-Reply-To: <20230417032654.32352-1-xiubli@redhat.com>

From: Jeff Layton <jlayton@kernel.org>

Have get_reply check for the presence of sparse read ops in the
request and set the sparse_read boolean in the msg. That will queue the
messenger layer to use the sparse read codepath instead of the normal
data receive.

Add a new sparse_read operation for the OSD client, driven by its own
state machine. The messenger will repeatedly call the sparse_read
operation, and it will pass back the necessary info to set up to read
the next extent of data, while zero-filling the sparse regions.

The state machine will stop at the end of the last extent, and will
attach the extent map buffer to the ceph_osd_req_op so that the caller
can use it.

Tested-by: Luís Henriques <lhenriques@suse.de>
Tested-by: Venky Shankar <vshankar@redhat.com>
Reviewed-by: Luís Henriques <lhenriques@suse.de>
Reviewed-by: Xiubo Li <xiubli@redhat.com>
Signed-off-by: Jeff Layton <jlayton@kernel.org>
---
 include/linux/ceph/osd_client.h |  32 ++++
 net/ceph/osd_client.c           | 255 +++++++++++++++++++++++++++++++-
 2 files changed, 283 insertions(+), 4 deletions(-)

diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 05da1e755b7b..460881c93f9a 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -40,6 +40,36 @@ struct ceph_sparse_extent {
 	u64	len;
 } __packed;
 
+/* Sparse read state machine state values */
+enum ceph_sparse_read_state {
+	CEPH_SPARSE_READ_HDR	= 0,
+	CEPH_SPARSE_READ_EXTENTS,
+	CEPH_SPARSE_READ_DATA_LEN,
+	CEPH_SPARSE_READ_DATA,
+};
+
+/*
+ * A SPARSE_READ reply is a 32-bit count of extents, followed by an array of
+ * 64-bit offset/length pairs, and then all of the actual file data
+ * concatenated after it (sans holes).
+ *
+ * Unfortunately, we don't know how long the extent array is until we've
+ * started reading the data section of the reply. The caller should send down
+ * a destination buffer for the array, but we'll alloc one if it's too small
+ * or if the caller doesn't.
+ */
+struct ceph_sparse_read {
+	enum ceph_sparse_read_state	sr_state;	/* state machine state */
+	u64				sr_req_off;	/* orig request offset */
+	u64				sr_req_len;	/* orig request length */
+	u64				sr_pos;		/* current pos in buffer */
+	int				sr_index;	/* current extent index */
+	__le32				sr_datalen;	/* length of actual data */
+	u32				sr_count;	/* extent count in reply */
+	int				sr_ext_len;	/* length of extent array */
+	struct ceph_sparse_extent	*sr_extent;	/* extent array */
+};
+
 /*
  * A given osd we're communicating with.
  *
@@ -48,6 +78,7 @@ struct ceph_sparse_extent {
  */
 struct ceph_osd {
 	refcount_t o_ref;
+	int o_sparse_op_idx;
 	struct ceph_osd_client *o_osdc;
 	int o_osd;
 	int o_incarnation;
@@ -63,6 +94,7 @@ struct ceph_osd {
 	unsigned long lru_ttl;
 	struct list_head o_keepalive_item;
 	struct mutex lock;
+	struct ceph_sparse_read	o_sparse_read;
 };
 
 #define CEPH_OSD_SLAB_OPS	2
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index e2f1d1dcbb84..8534ca9c39b9 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -376,6 +376,7 @@ static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
 
 	switch (op->op) {
 	case CEPH_OSD_OP_READ:
+	case CEPH_OSD_OP_SPARSE_READ:
 	case CEPH_OSD_OP_WRITE:
 	case CEPH_OSD_OP_WRITEFULL:
 		kfree(op->extent.sparse_ext);
@@ -670,6 +671,7 @@ static void get_num_data_items(struct ceph_osd_request *req,
 		/* reply */
 		case CEPH_OSD_OP_STAT:
 		case CEPH_OSD_OP_READ:
+		case CEPH_OSD_OP_SPARSE_READ:
 		case CEPH_OSD_OP_LIST_WATCHERS:
 			*num_reply_data_items += 1;
 			break;
@@ -739,7 +741,7 @@ void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
 
 	BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
 	       opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
-	       opcode != CEPH_OSD_OP_TRUNCATE);
+	       opcode != CEPH_OSD_OP_TRUNCATE && opcode != CEPH_OSD_OP_SPARSE_READ);
 
 	op->extent.offset = offset;
 	op->extent.length = length;
@@ -964,6 +966,7 @@ static u32 osd_req_encode_op(struct ceph_osd_op *dst,
 	case CEPH_OSD_OP_STAT:
 		break;
 	case CEPH_OSD_OP_READ:
+	case CEPH_OSD_OP_SPARSE_READ:
 	case CEPH_OSD_OP_WRITE:
 	case CEPH_OSD_OP_WRITEFULL:
 	case CEPH_OSD_OP_ZERO:
@@ -1060,7 +1063,8 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
 
 	BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
 	       opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
-	       opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
+	       opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE &&
+	       opcode != CEPH_OSD_OP_SPARSE_READ);
 
 	req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
 					GFP_NOFS);
@@ -1201,6 +1205,13 @@ static void osd_init(struct ceph_osd *osd)
 	mutex_init(&osd->lock);
 }
 
+static void ceph_init_sparse_read(struct ceph_sparse_read *sr)
+{
+	kfree(sr->sr_extent);
+	memset(sr, '\0', sizeof(*sr));
+	sr->sr_state = CEPH_SPARSE_READ_HDR;
+}
+
 static void osd_cleanup(struct ceph_osd *osd)
 {
 	WARN_ON(!RB_EMPTY_NODE(&osd->o_node));
@@ -1211,6 +1222,8 @@ static void osd_cleanup(struct ceph_osd *osd)
 	WARN_ON(!list_empty(&osd->o_osd_lru));
 	WARN_ON(!list_empty(&osd->o_keepalive_item));
 
+	ceph_init_sparse_read(&osd->o_sparse_read);
+
 	if (osd->o_auth.authorizer) {
 		WARN_ON(osd_homeless(osd));
 		ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
@@ -1230,6 +1243,9 @@ static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
 	osd_init(osd);
 	osd->o_osdc = osdc;
 	osd->o_osd = onum;
+	osd->o_sparse_op_idx = -1;
+
+	ceph_init_sparse_read(&osd->o_sparse_read);
 
 	ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
 
@@ -2034,6 +2050,7 @@ static void setup_request_data(struct ceph_osd_request *req)
 					       &op->raw_data_in);
 			break;
 		case CEPH_OSD_OP_READ:
+		case CEPH_OSD_OP_SPARSE_READ:
 			ceph_osdc_msg_data_add(reply_msg,
 					       &op->extent.osd_data);
 			break;
@@ -2453,8 +2470,10 @@ static void finish_request(struct ceph_osd_request *req)
 
 	req->r_end_latency = ktime_get();
 
-	if (req->r_osd)
+	if (req->r_osd) {
+		ceph_init_sparse_read(&req->r_osd->o_sparse_read);
 		unlink_request(req->r_osd, req);
+	}
 	atomic_dec(&osdc->num_requests);
 
 	/*
@@ -5358,6 +5377,24 @@ static void osd_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
 	ceph_msg_put(msg);
 }
 
+/* How much sparse data was requested? */
+static u64 sparse_data_requested(struct ceph_osd_request *req)
+{
+	u64 len = 0;
+
+	if (req->r_flags & CEPH_OSD_FLAG_READ) {
+		int i;
+
+		for (i = 0; i < req->r_num_ops; ++i) {
+			struct ceph_osd_req_op *op = &req->r_ops[i];
+
+			if (op->op == CEPH_OSD_OP_SPARSE_READ)
+				len += op->extent.length;
+		}
+	}
+	return len;
+}
+
 /*
  * Lookup and return message for incoming reply.  Don't try to do
  * anything about a larger than preallocated data portion of the
@@ -5374,6 +5411,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
 	int front_len = le32_to_cpu(hdr->front_len);
 	int data_len = le32_to_cpu(hdr->data_len);
 	u64 tid = le64_to_cpu(hdr->tid);
+	u64 srlen;
 
 	down_read(&osdc->lock);
 	if (!osd_registered(osd)) {
@@ -5406,7 +5444,8 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
 		req->r_reply = m;
 	}
 
-	if (data_len > req->r_reply->data_length) {
+	srlen = sparse_data_requested(req);
+	if (!srlen && data_len > req->r_reply->data_length) {
 		pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
 			__func__, osd->o_osd, req->r_tid, data_len,
 			req->r_reply->data_length);
@@ -5416,6 +5455,8 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
 	}
 
 	m = ceph_msg_get(req->r_reply);
+	m->sparse_read = (bool)srlen;
+
 	dout("get_reply tid %lld %p\n", tid, m);
 
 out_unlock_session:
@@ -5648,9 +5689,215 @@ static int osd_check_message_signature(struct ceph_msg *msg)
 	return ceph_auth_check_message_signature(auth, msg);
 }
 
+static void advance_cursor(struct ceph_msg_data_cursor *cursor, size_t len, bool zero)
+{
+	while (len) {
+		struct page *page;
+		size_t poff, plen;
+
+		page = ceph_msg_data_next(cursor, &poff, &plen);
+		if (plen > len)
+			plen = len;
+		if (zero)
+			zero_user_segment(page, poff, poff + plen);
+		len -= plen;
+		ceph_msg_data_advance(cursor, plen);
+	}
+}
+
+static int prep_next_sparse_read(struct ceph_connection *con,
+				 struct ceph_msg_data_cursor *cursor)
+{
+	struct ceph_osd *o = con->private;
+	struct ceph_sparse_read *sr = &o->o_sparse_read;
+	struct ceph_osd_request *req;
+	struct ceph_osd_req_op *op;
+
+	spin_lock(&o->o_requests_lock);
+	req = lookup_request(&o->o_requests, le64_to_cpu(con->in_msg->hdr.tid));
+	if (!req) {
+		spin_unlock(&o->o_requests_lock);
+		return -EBADR;
+	}
+
+	if (o->o_sparse_op_idx < 0) {
+		u64 srlen = sparse_data_requested(req);
+
+		dout("%s: [%d] starting new sparse read req. srlen=0x%llx\n",
+		     __func__, o->o_osd, srlen);
+		ceph_msg_data_cursor_init(cursor, con->in_msg, srlen);
+	} else {
+		u64 end;
+
+		op = &req->r_ops[o->o_sparse_op_idx];
+
+		WARN_ON_ONCE(op->extent.sparse_ext);
+
+		/* hand back buffer we took earlier */
+		op->extent.sparse_ext = sr->sr_extent;
+		sr->sr_extent = NULL;
+		op->extent.sparse_ext_cnt = sr->sr_count;
+		sr->sr_ext_len = 0;
+		dout("%s: [%d] completed extent array len %d cursor->resid %zd\n",
+		     __func__, o->o_osd, op->extent.sparse_ext_cnt, cursor->resid);
+		/* Advance to end of data for this operation */
+		end = ceph_sparse_ext_map_end(op);
+		if (end < sr->sr_req_len)
+			advance_cursor(cursor, sr->sr_req_len - end, false);
+	}
+
+	ceph_init_sparse_read(sr);
+
+	/* find next op in this request (if any) */
+	while (++o->o_sparse_op_idx < req->r_num_ops) {
+		op = &req->r_ops[o->o_sparse_op_idx];
+		if (op->op == CEPH_OSD_OP_SPARSE_READ)
+			goto found;
+	}
+
+	/* reset for next sparse read request */
+	spin_unlock(&o->o_requests_lock);
+	o->o_sparse_op_idx = -1;
+	return 0;
+found:
+	sr->sr_req_off = op->extent.offset;
+	sr->sr_req_len = op->extent.length;
+	sr->sr_pos = sr->sr_req_off;
+	dout("%s: [%d] new sparse read op at idx %d 0x%llx~0x%llx\n", __func__,
+	     o->o_osd, o->o_sparse_op_idx, sr->sr_req_off, sr->sr_req_len);
+
+	/* hand off request's sparse extent map buffer */
+	sr->sr_ext_len = op->extent.sparse_ext_cnt;
+	op->extent.sparse_ext_cnt = 0;
+	sr->sr_extent = op->extent.sparse_ext;
+	op->extent.sparse_ext = NULL;
+
+	spin_unlock(&o->o_requests_lock);
+	return 1;
+}
+
+#ifdef __BIG_ENDIAN
+static inline void convert_extent_map(struct ceph_sparse_read *sr)
+{
+	int i;
+
+	for (i = 0; i < sr->sr_count; i++) {
+		struct ceph_sparse_extent *ext = &sr->sr_extent[i];
+
+		ext->off = le64_to_cpu((__force __le64)ext->off);
+		ext->len = le64_to_cpu((__force __le64)ext->len);
+	}
+}
+#else
+static inline void convert_extent_map(struct ceph_sparse_read *sr)
+{
+}
+#endif
+
+#define MAX_EXTENTS 4096
+
+static int osd_sparse_read(struct ceph_connection *con,
+			   struct ceph_msg_data_cursor *cursor,
+			   char **pbuf)
+{
+	struct ceph_osd *o = con->private;
+	struct ceph_sparse_read *sr = &o->o_sparse_read;
+	u32 count = sr->sr_count;
+	u64 eoff, elen;
+	int ret;
+
+	switch (sr->sr_state) {
+	case CEPH_SPARSE_READ_HDR:
+next_op:
+		ret = prep_next_sparse_read(con, cursor);
+		if (ret <= 0)
+			return ret;
+
+		/* number of extents */
+		ret = sizeof(sr->sr_count);
+		*pbuf = (char *)&sr->sr_count;
+		sr->sr_state = CEPH_SPARSE_READ_EXTENTS;
+		break;
+	case CEPH_SPARSE_READ_EXTENTS:
+		/* Convert sr_count to host-endian */
+		count = le32_to_cpu((__force __le32)sr->sr_count);
+		sr->sr_count = count;
+		dout("[%d] got %u extents\n", o->o_osd, count);
+
+		if (count > 0) {
+			if (!sr->sr_extent || count > sr->sr_ext_len) {
+				/*
+				 * Apply a hard cap to the number of extents.
+				 * If we have more, assume something is wrong.
+				 */
+				if (count > MAX_EXTENTS) {
+					dout("%s: OSD returned 0x%x extents in a single reply!\n",
+						  __func__, count);
+					return -EREMOTEIO;
+				}
+
+				/* no extent array provided, or too short */
+				kfree(sr->sr_extent);
+				sr->sr_extent = kmalloc_array(count,
+							      sizeof(*sr->sr_extent),
+							      GFP_NOIO);
+				if (!sr->sr_extent)
+					return -ENOMEM;
+				sr->sr_ext_len = count;
+			}
+			ret = count * sizeof(*sr->sr_extent);
+			*pbuf = (char *)sr->sr_extent;
+			sr->sr_state = CEPH_SPARSE_READ_DATA_LEN;
+			break;
+		}
+		/* No extents? Read data len */
+		fallthrough;
+	case CEPH_SPARSE_READ_DATA_LEN:
+		convert_extent_map(sr);
+		ret = sizeof(sr->sr_datalen);
+		*pbuf = (char *)&sr->sr_datalen;
+		sr->sr_state = CEPH_SPARSE_READ_DATA;
+		break;
+	case CEPH_SPARSE_READ_DATA:
+		if (sr->sr_index >= count) {
+			sr->sr_state = CEPH_SPARSE_READ_HDR;
+			goto next_op;
+		}
+
+		eoff = sr->sr_extent[sr->sr_index].off;
+		elen = sr->sr_extent[sr->sr_index].len;
+
+		dout("[%d] ext %d off 0x%llx len 0x%llx\n",
+		     o->o_osd, sr->sr_index, eoff, elen);
+
+		if (elen > INT_MAX) {
+			dout("Sparse read extent length too long (0x%llx)\n", elen);
+			return -EREMOTEIO;
+		}
+
+		/* zero out anything from sr_pos to start of extent */
+		if (sr->sr_pos < eoff)
+			advance_cursor(cursor, eoff - sr->sr_pos, true);
+
+		/* Set position to end of extent */
+		sr->sr_pos = eoff + elen;
+
+		/* send back the new length and nullify the ptr */
+		cursor->sr_resid = elen;
+		ret = elen;
+		*pbuf = NULL;
+
+		/* Bump the array index */
+		++sr->sr_index;
+		break;
+	}
+	return ret;
+}
+
 static const struct ceph_connection_operations osd_con_ops = {
 	.get = osd_get_con,
 	.put = osd_put_con,
+	.sparse_read = osd_sparse_read,
 	.alloc_msg = osd_alloc_msg,
 	.dispatch = osd_dispatch,
 	.fault = osd_fault,
-- 
2.39.1


  parent reply	other threads:[~2023-04-17  3:28 UTC|newest]

Thread overview: 77+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-04-17  3:25 [PATCH v19 00/70] ceph+fscrypt: full support xiubli
2023-04-17  3:25 ` [PATCH v19 01/70] libceph: add spinlock around osd->o_requests xiubli
2023-04-17  3:25 ` [PATCH v19 02/70] libceph: define struct ceph_sparse_extent and add some helpers xiubli
2023-04-17  3:25 ` [PATCH v19 03/70] libceph: add sparse read support to msgr2 crc state machine xiubli
2023-04-17  3:25 ` xiubli [this message]
2023-04-17  3:25 ` [PATCH v19 05/70] libceph: support sparse reads on msgr2 secure codepath xiubli
2023-04-17  3:25 ` [PATCH v19 06/70] libceph: add sparse read support to msgr1 xiubli
2023-04-17  3:25 ` [PATCH v19 07/70] ceph: add new mount option to enable sparse reads xiubli
2023-04-17  3:25 ` [PATCH v19 08/70] ceph: preallocate inode for ops that may create one xiubli
2023-04-17  3:25 ` [PATCH v19 09/70] ceph: make ceph_msdc_build_path use ref-walk xiubli
2023-04-17  3:25 ` [PATCH v19 10/70] libceph: add new iov_iter-based ceph_msg_data_type and ceph_osd_data_type xiubli
2023-04-17  3:25 ` [PATCH v19 11/70] ceph: use osd_req_op_extent_osd_iter for netfs reads xiubli
2023-04-17  3:25 ` [PATCH v19 12/70] ceph: fscrypt_auth handling for ceph xiubli
2023-04-17  3:25 ` [PATCH v19 13/70] ceph: ensure that we accept a new context from MDS for new inodes xiubli
2023-04-17  3:25 ` [PATCH v19 14/70] ceph: add support for fscrypt_auth/fscrypt_file to cap messages xiubli
2023-04-17  3:25 ` [PATCH v19 15/70] ceph: implement -o test_dummy_encryption mount option xiubli
2023-04-17  3:26 ` [PATCH v19 16/70] ceph: decode alternate_name in lease info xiubli
2023-04-17  3:26 ` [PATCH v19 17/70] ceph: add fscrypt ioctls xiubli
2023-04-17  3:26 ` [PATCH v19 18/70] ceph: make the ioctl cmd more readable in debug log xiubli
2023-04-17  3:26 ` [PATCH v19 19/70] ceph: add base64 endcoding routines for encrypted names xiubli
2023-04-17  3:26 ` [PATCH v19 20/70] ceph: add encrypted fname handling to ceph_mdsc_build_path xiubli
2023-04-17  3:26 ` [PATCH v19 21/70] ceph: send altname in MClientRequest xiubli
2023-04-17  3:26 ` [PATCH v19 22/70] ceph: encode encrypted name in dentry release xiubli
2023-04-17  3:26 ` [PATCH v19 23/70] ceph: properly set DCACHE_NOKEY_NAME flag in lookup xiubli
2023-04-17  3:26 ` [PATCH v19 24/70] ceph: set DCACHE_NOKEY_NAME in atomic open xiubli
2023-04-17  3:26 ` [PATCH v19 25/70] ceph: make d_revalidate call fscrypt revalidator for encrypted dentries xiubli
2023-04-17  3:26 ` [PATCH v19 26/70] ceph: add helpers for converting names for userland presentation xiubli
2023-04-17  3:26 ` [PATCH v19 27/70] ceph: fix base64 encoded name's length check in ceph_fname_to_usr() xiubli
2023-04-17  3:26 ` [PATCH v19 28/70] ceph: add fscrypt support to ceph_fill_trace xiubli
2023-04-17  3:26 ` [PATCH v19 29/70] ceph: pass the request to parse_reply_info_readdir() xiubli
2023-04-17  3:26 ` [PATCH v19 30/70] ceph: add ceph_encode_encrypted_dname() helper xiubli
2023-04-17  3:26 ` [PATCH v19 31/70] ceph: add support to readdir for encrypted filenames xiubli
2023-04-17  3:26 ` [PATCH v19 32/70] ceph: create symlinks with encrypted and base64-encoded targets xiubli
2023-04-17  3:26 ` [PATCH v19 33/70] ceph: make ceph_get_name decrypt filenames xiubli
2023-04-17  3:26 ` [PATCH v19 34/70] ceph: add a new ceph.fscrypt.auth vxattr xiubli
2023-04-17  3:26 ` [PATCH v19 35/70] ceph: add some fscrypt guardrails xiubli
2023-04-17  3:26 ` [PATCH v19 36/70] ceph: allow encrypting a directory while not having Ax caps xiubli
2023-04-17  3:26 ` [PATCH v19 37/70] ceph: mark directory as non-complete after loading key xiubli
2023-04-17  3:26 ` [PATCH v19 38/70] ceph: don't allow changing layout on encrypted files/directories xiubli
2023-04-17  3:26 ` [PATCH v19 39/70] libceph: add CEPH_OSD_OP_ASSERT_VER support xiubli
2023-04-17  3:26 ` [PATCH v19 40/70] ceph: size handling for encrypted inodes in cap updates xiubli
2023-04-17  3:26 ` [PATCH v19 41/70] ceph: fscrypt_file field handling in MClientRequest messages xiubli
2023-04-17  3:26 ` [PATCH v19 42/70] ceph: get file size from fscrypt_file when present in inode traces xiubli
2023-04-17  3:26 ` [PATCH v19 43/70] ceph: handle fscrypt fields in cap messages from MDS xiubli
2023-04-17  3:26 ` [PATCH v19 44/70] ceph: update WARN_ON message to pr_warn xiubli
2023-04-17  3:26 ` [PATCH v19 45/70] ceph: add __ceph_get_caps helper support xiubli
2023-04-17  3:26 ` [PATCH v19 46/70] ceph: add __ceph_sync_read " xiubli
2023-04-17  3:26 ` [PATCH v19 47/70] ceph: add object version support for sync read xiubli
2023-04-17  3:26 ` [PATCH v19 48/70] ceph: add infrastructure for file encryption and decryption xiubli
2023-04-17  3:26 ` [PATCH v19 49/70] ceph: add truncate size handling support for fscrypt xiubli
2023-04-17  3:26 ` [PATCH v19 50/70] libceph: allow ceph_osdc_new_request to accept a multi-op read xiubli
2023-04-17  3:26 ` [PATCH v19 51/70] ceph: disable fallocate for encrypted inodes xiubli
2023-04-17  3:26 ` [PATCH v19 52/70] ceph: disable copy offload on " xiubli
2023-04-17  3:26 ` [PATCH v19 53/70] ceph: don't use special DIO path for " xiubli
2023-04-17  3:26 ` [PATCH v19 54/70] ceph: align data in pages in ceph_sync_write xiubli
2023-04-17  3:26 ` [PATCH v19 55/70] ceph: add read/modify/write to ceph_sync_write xiubli
2023-04-17  3:26 ` [PATCH v19 56/70] ceph: plumb in decryption during sync reads xiubli
2023-04-17  3:26 ` [PATCH v19 57/70] ceph: add fscrypt decryption support to ceph_netfs_issue_op xiubli
2023-04-17  3:26 ` [PATCH v19 58/70] ceph: set i_blkbits to crypto block size for encrypted inodes xiubli
2023-04-17  3:26 ` [PATCH v19 59/70] ceph: add encryption support to writepage xiubli
2023-04-17  3:26 ` [PATCH v19 60/70] ceph: fscrypt support for writepages xiubli
2023-04-17  3:26 ` [PATCH v19 61/70] ceph: invalidate pages when doing direct/sync writes xiubli
2023-04-17  3:26 ` [PATCH v19 62/70] ceph: add support for encrypted snapshot names xiubli
2023-04-17  3:26 ` [PATCH v19 63/70] ceph: add support for handling " xiubli
2023-04-17  3:26 ` [PATCH v19 64/70] ceph: update documentation regarding snapshot naming limitations xiubli
2023-04-17  3:26 ` [PATCH v19 65/70] ceph: prevent snapshots to be created in encrypted locked directories xiubli
2023-04-17  3:26 ` [PATCH v19 66/70] ceph: report STATX_ATTR_ENCRYPTED on encrypted inodes xiubli
2023-04-17  3:26 ` [PATCH v19 67/70] ceph: drop the messages from MDS when unmounting xiubli
2023-04-17  3:26 ` [PATCH v19 68/70] ceph: fix updating the i_truncate_pagecache_size for fscrypt xiubli
2023-06-06  7:04   ` Milind Changire
2023-04-17  3:26 ` [PATCH v19 69/70] ceph: switch ceph_open() to use new fscrypt helper xiubli
2023-06-06  6:25   ` Milind Changire
2023-06-06  8:37     ` Xiubo Li
2023-06-06  9:05       ` Luís Henriques
2023-06-06  9:09         ` Xiubo Li
2023-04-17  3:26 ` [PATCH v19 70/70] ceph: switch ceph_open_atomic() to use the " xiubli
2023-06-06  6:18   ` Milind Changire

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230417032654.32352-5-xiubli@redhat.com \
    --to=xiubli@redhat.com \
    --cc=ceph-devel@vger.kernel.org \
    --cc=idryomov@gmail.com \
    --cc=jlayton@kernel.org \
    --cc=lhenriques@suse.de \
    --cc=mchangir@redhat.com \
    --cc=vshankar@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.