lustre-devel-lustre.org archive mirror
 help / color / mirror / Atom feed
From: James Simmons <jsimmons@infradead.org>
To: lustre-devel@lists.lustre.org
Subject: [lustre-devel] [PATCH 35/37] lustre: llite: fix short io for AIO
Date: Wed, 15 Jul 2020 16:45:16 -0400	[thread overview]
Message-ID: <1594845918-29027-36-git-send-email-jsimmons@infradead.org> (raw)
In-Reply-To: <1594845918-29027-1-git-send-email-jsimmons@infradead.org>

From: Wang Shilong <wshilong@ddn.com>

The problem is currently AIO could not handle i/o size > stripe size:

We need cl io loop to handle io across stripes, since -EIOCBQUEUED is
returned for AIO, io loop will be stopped thus short io happen.

The patch try to fix the problem by making IO engine aware of
special error, and it will be proceed to finish all IO requests.

Fixes: fde7ac1942f5 ("lustre: clio: AIO support for direct IO")
WC-bug-id: https://jira.whamcloud.com/browse/LU-13697
Lustre-commit: 84c3e85ced2dd ("LU-13697 llite: fix short io for AIO")
Signed-off-by: Wang Shilong <wshilong@ddn.com>
Reviewed-on: https://review.whamcloud.com/39104
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-by: Bobi Jam <bobijam@hotmail.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
Signed-off-by: James Simmons <jsimmons@infradead.org>
---
 fs/lustre/include/cl_object.h  |  2 ++
 fs/lustre/llite/file.c         | 32 +++++++++++++++++-
 fs/lustre/llite/rw26.c         | 43 +++++++++++++++++--------
 fs/lustre/llite/vvp_internal.h |  3 +-
 fs/lustre/llite/vvp_io.c       | 73 ++++++++++++++++++++++++++++--------------
 fs/lustre/obdclass/cl_io.c     |  9 +++++-
 6 files changed, 122 insertions(+), 40 deletions(-)

diff --git a/fs/lustre/include/cl_object.h b/fs/lustre/include/cl_object.h
index e656c68..e849f23 100644
--- a/fs/lustre/include/cl_object.h
+++ b/fs/lustre/include/cl_object.h
@@ -1814,6 +1814,8 @@ struct cl_io {
 	enum cl_io_state	ci_state;
 	/** main object this io is against. Immutable after creation. */
 	struct cl_object	*ci_obj;
+	/** one AIO request might be split in cl_io_loop */
+	struct cl_dio_aio	*ci_aio;
 	/**
 	 * Upper layer io, of which this io is a part of. Immutable after
 	 * creation.
diff --git a/fs/lustre/llite/file.c b/fs/lustre/llite/file.c
index 1849229..757950f 100644
--- a/fs/lustre/llite/file.c
+++ b/fs/lustre/llite/file.c
@@ -1514,6 +1514,7 @@ static void ll_heat_add(struct inode *inode, enum cl_io_type iot,
 	int rc = 0;
 	unsigned int retried = 0;
 	unsigned int ignore_lockless = 0;
+	bool is_aio = false;
 
 	CDEBUG(D_VFSTRACE, "file: %pD, type: %d ppos: %llu, count: %zu\n",
 	       file, iot, *ppos, count);
@@ -1536,6 +1537,15 @@ static void ll_heat_add(struct inode *inode, enum cl_io_type iot,
 		vio->vui_fd  = file->private_data;
 		vio->vui_iter = args->u.normal.via_iter;
 		vio->vui_iocb = args->u.normal.via_iocb;
+		if (file->f_flags & O_DIRECT) {
+			if (!is_sync_kiocb(vio->vui_iocb))
+				is_aio = true;
+			io->ci_aio = cl_aio_alloc(vio->vui_iocb);
+			if (!io->ci_aio) {
+				rc = -ENOMEM;
+				goto out;
+			}
+		}
 		/*
 		 * Direct IO reads must also take range lock,
 		 * or multiple reads will try to work on the same pages
@@ -1567,7 +1577,14 @@ static void ll_heat_add(struct inode *inode, enum cl_io_type iot,
 		rc = io->ci_result;
 	}
 
-	if (io->ci_nob > 0) {
+	/*
+	 * In order to move forward AIO, ci_nob was increased,
+	 * but that doesn't mean io have been finished, it just
+	 * means io have been submited, we will always return
+	 * EIOCBQUEUED to the caller, So we could only return
+	 * number of bytes in non-AIO case.
+	 */
+	if (io->ci_nob > 0 && !is_aio) {
 		result += io->ci_nob;
 		count -= io->ci_nob;
 		*ppos = io->u.ci_wr.wr.crw_pos;
@@ -1577,6 +1594,19 @@ static void ll_heat_add(struct inode *inode, enum cl_io_type iot,
 			args->u.normal.via_iter = vio->vui_iter;
 	}
 out:
+	if (io->ci_aio) {
+		/**
+		 * Drop one extra reference so that end_io() could be
+		 * called for this IO context, we could call it after
+		 * we make sure all AIO requests have been proceed.
+		 */
+		cl_sync_io_note(env, &io->ci_aio->cda_sync,
+				rc == -EIOCBQUEUED ? 0 : rc);
+		if (!is_aio) {
+			cl_aio_free(io->ci_aio);
+			io->ci_aio = NULL;
+		}
+	}
 	cl_io_fini(env, io);
 
 	CDEBUG(D_VFSTRACE,
diff --git a/fs/lustre/llite/rw26.c b/fs/lustre/llite/rw26.c
index d0e3ff6..b3802cf 100644
--- a/fs/lustre/llite/rw26.c
+++ b/fs/lustre/llite/rw26.c
@@ -290,6 +290,7 @@ static ssize_t ll_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 	ssize_t tot_bytes = 0, result = 0;
 	loff_t file_offset = iocb->ki_pos;
 	int rw = iov_iter_rw(iter);
+	struct vvp_io *vio;
 
 	/* if file is encrypted, return 0 so that we fall back to buffered IO */
 	if (IS_ENCRYPTED(inode))
@@ -319,12 +320,13 @@ static ssize_t ll_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 
 	env = lcc->lcc_env;
 	LASSERT(!IS_ERR(env));
+	vio = vvp_env_io(env);
 	io = lcc->lcc_io;
 	LASSERT(io);
 
-	aio = cl_aio_alloc(iocb);
-	if (!aio)
-		return -ENOMEM;
+	aio = io->ci_aio;
+	LASSERT(aio);
+	LASSERT(aio->cda_iocb == iocb);
 
 	/* 0. Need locking between buffered and direct access. and race with
 	 *    size changing by concurrent truncates and writes.
@@ -368,24 +370,39 @@ static ssize_t ll_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 	}
 
 out:
-	aio->cda_bytes = tot_bytes;
-	cl_sync_io_note(env, &aio->cda_sync, result);
+	aio->cda_bytes += tot_bytes;
 
 	if (is_sync_kiocb(iocb)) {
+		struct cl_sync_io *anchor = &aio->cda_sync;
 		ssize_t rc2;
 
-		rc2 = cl_sync_io_wait(env, &aio->cda_sync, 0);
+		/**
+		 * @anchor was inited as 1 to prevent end_io to be
+		 * called before we add all pages for IO, so drop
+		 * one extra reference to make sure we could wait
+		 * count to be zero.
+		 */
+		cl_sync_io_note(env, anchor, result);
+
+		rc2 = cl_sync_io_wait(env, anchor, 0);
 		if (result == 0 && rc2)
 			result = rc2;
 
+		/**
+		 * One extra reference again, as if @anchor is
+		 * reused we assume it as 1 before using.
+		 */
+		atomic_add(1, &anchor->csi_sync_nr);
 		if (result == 0) {
-			struct vvp_io *vio = vvp_env_io(env);
 			/* no commit async for direct IO */
-			vio->u.write.vui_written += tot_bytes;
+			vio->u.readwrite.vui_written += tot_bytes;
 			result = tot_bytes;
 		}
-		cl_aio_free(aio);
 	} else {
+		if (rw == WRITE)
+			vio->u.readwrite.vui_written += tot_bytes;
+		else
+			vio->u.readwrite.vui_read += tot_bytes;
 		result = -EIOCBQUEUED;
 	}
 
@@ -523,7 +540,7 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
 	vmpage = grab_cache_page_nowait(mapping, index);
 	if (unlikely(!vmpage || PageDirty(vmpage) || PageWriteback(vmpage))) {
 		struct vvp_io *vio = vvp_env_io(env);
-		struct cl_page_list *plist = &vio->u.write.vui_queue;
+		struct cl_page_list *plist = &vio->u.readwrite.vui_queue;
 
 		/* if the page is already in dirty cache, we have to commit
 		 * the pages right now; otherwise, it may cause deadlock
@@ -685,17 +702,17 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
 
 	LASSERT(cl_page_is_owned(page, io));
 	if (copied > 0) {
-		struct cl_page_list *plist = &vio->u.write.vui_queue;
+		struct cl_page_list *plist = &vio->u.readwrite.vui_queue;
 
 		lcc->lcc_page = NULL; /* page will be queued */
 
 		/* Add it into write queue */
 		cl_page_list_add(plist, page);
 		if (plist->pl_nr == 1) /* first page */
-			vio->u.write.vui_from = from;
+			vio->u.readwrite.vui_from = from;
 		else
 			LASSERT(from == 0);
-		vio->u.write.vui_to = from + copied;
+		vio->u.readwrite.vui_to = from + copied;
 
 		/*
 		 * To address the deadlock in balance_dirty_pages() where
diff --git a/fs/lustre/llite/vvp_internal.h b/fs/lustre/llite/vvp_internal.h
index cff85ea..6956d6b 100644
--- a/fs/lustre/llite/vvp_internal.h
+++ b/fs/lustre/llite/vvp_internal.h
@@ -88,9 +88,10 @@ struct vvp_io {
 		struct {
 			struct cl_page_list	vui_queue;
 			unsigned long		vui_written;
+			unsigned long		vui_read;
 			int			vui_from;
 			int			vui_to;
-		} write;
+		} readwrite; /* normal io */
 	} u;
 
 	/**
diff --git a/fs/lustre/llite/vvp_io.c b/fs/lustre/llite/vvp_io.c
index c3fb03a..59da56d 100644
--- a/fs/lustre/llite/vvp_io.c
+++ b/fs/lustre/llite/vvp_io.c
@@ -249,10 +249,20 @@ static int vvp_io_write_iter_init(const struct lu_env *env,
 {
 	struct vvp_io *vio = cl2vvp_io(env, ios);
 
-	cl_page_list_init(&vio->u.write.vui_queue);
-	vio->u.write.vui_written = 0;
-	vio->u.write.vui_from = 0;
-	vio->u.write.vui_to = PAGE_SIZE;
+	cl_page_list_init(&vio->u.readwrite.vui_queue);
+	vio->u.readwrite.vui_written = 0;
+	vio->u.readwrite.vui_from = 0;
+	vio->u.readwrite.vui_to = PAGE_SIZE;
+
+	return 0;
+}
+
+static int vvp_io_read_iter_init(const struct lu_env *env,
+				 const struct cl_io_slice *ios)
+{
+	struct vvp_io *vio = cl2vvp_io(env, ios);
+
+	vio->u.readwrite.vui_read = 0;
 
 	return 0;
 }
@@ -262,7 +272,7 @@ static void vvp_io_write_iter_fini(const struct lu_env *env,
 {
 	struct vvp_io *vio = cl2vvp_io(env, ios);
 
-	LASSERT(vio->u.write.vui_queue.pl_nr == 0);
+	LASSERT(vio->u.readwrite.vui_queue.pl_nr == 0);
 }
 
 static int vvp_io_fault_iter_init(const struct lu_env *env,
@@ -824,7 +834,13 @@ static int vvp_io_read_start(const struct lu_env *env,
 			io->ci_continue = 0;
 		io->ci_nob += result;
 		result = 0;
+	} else if (result == -EIOCBQUEUED) {
+		io->ci_nob += vio->u.readwrite.vui_read;
+		if (vio->vui_iocb)
+			vio->vui_iocb->ki_pos = pos +
+						vio->u.readwrite.vui_read;
 	}
+
 	return result;
 }
 
@@ -1017,23 +1033,24 @@ int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io)
 	struct cl_object *obj = io->ci_obj;
 	struct inode *inode = vvp_object_inode(obj);
 	struct vvp_io *vio = vvp_env_io(env);
-	struct cl_page_list *queue = &vio->u.write.vui_queue;
+	struct cl_page_list *queue = &vio->u.readwrite.vui_queue;
 	struct cl_page *page;
 	int rc = 0;
 	int bytes = 0;
-	unsigned int npages = vio->u.write.vui_queue.pl_nr;
+	unsigned int npages = vio->u.readwrite.vui_queue.pl_nr;
 
 	if (npages == 0)
 		return 0;
 
 	CDEBUG(D_VFSTRACE, "commit async pages: %d, from %d, to %d\n",
-	       npages, vio->u.write.vui_from, vio->u.write.vui_to);
+	       npages, vio->u.readwrite.vui_from, vio->u.readwrite.vui_to);
 
 	LASSERT(page_list_sanity_check(obj, queue));
 
 	/* submit IO with async write */
 	rc = cl_io_commit_async(env, io, queue,
-				vio->u.write.vui_from, vio->u.write.vui_to,
+				vio->u.readwrite.vui_from,
+				vio->u.readwrite.vui_to,
 				write_commit_callback);
 	npages -= queue->pl_nr; /* already committed pages */
 	if (npages > 0) {
@@ -1041,18 +1058,18 @@ int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io)
 		bytes = npages << PAGE_SHIFT;
 
 		/* first page */
-		bytes -= vio->u.write.vui_from;
+		bytes -= vio->u.readwrite.vui_from;
 		if (queue->pl_nr == 0) /* last page */
-			bytes -= PAGE_SIZE - vio->u.write.vui_to;
+			bytes -= PAGE_SIZE - vio->u.readwrite.vui_to;
 		LASSERTF(bytes > 0, "bytes = %d, pages = %d\n", bytes, npages);
 
-		vio->u.write.vui_written += bytes;
+		vio->u.readwrite.vui_written += bytes;
 
 		CDEBUG(D_VFSTRACE, "Committed %d pages %d bytes, tot: %ld\n",
-		       npages, bytes, vio->u.write.vui_written);
+		       npages, bytes, vio->u.readwrite.vui_written);
 
 		/* the first page must have been written. */
-		vio->u.write.vui_from = 0;
+		vio->u.readwrite.vui_from = 0;
 	}
 	LASSERT(page_list_sanity_check(obj, queue));
 	LASSERT(ergo(rc == 0, queue->pl_nr == 0));
@@ -1060,10 +1077,10 @@ int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io)
 	/* out of quota, try sync write */
 	if (rc == -EDQUOT && !cl_io_is_mkwrite(io)) {
 		rc = vvp_io_commit_sync(env, io, queue,
-					vio->u.write.vui_from,
-					vio->u.write.vui_to);
+					vio->u.readwrite.vui_from,
+					vio->u.readwrite.vui_to);
 		if (rc > 0) {
-			vio->u.write.vui_written += rc;
+			vio->u.readwrite.vui_written += rc;
 			rc = 0;
 		}
 	}
@@ -1181,15 +1198,15 @@ static int vvp_io_write_start(const struct lu_env *env,
 		result = vvp_io_write_commit(env, io);
 		/* Simulate short commit */
 		if (CFS_FAULT_CHECK(OBD_FAIL_LLITE_SHORT_COMMIT)) {
-			vio->u.write.vui_written >>= 1;
-			if (vio->u.write.vui_written > 0)
+			vio->u.readwrite.vui_written >>= 1;
+			if (vio->u.readwrite.vui_written > 0)
 				io->ci_need_restart = 1;
 		}
-		if (vio->u.write.vui_written > 0) {
-			result = vio->u.write.vui_written;
+		if (vio->u.readwrite.vui_written > 0) {
+			result = vio->u.readwrite.vui_written;
 			io->ci_nob += result;
-
-			CDEBUG(D_VFSTRACE, "write: nob %zd, result: %zd\n",
+			CDEBUG(D_VFSTRACE, "%s: write: nob %zd, result: %zd\n",
+			       file_dentry(file)->d_name.name,
 			       io->ci_nob, result);
 		} else {
 			io->ci_continue = 0;
@@ -1215,11 +1232,18 @@ static int vvp_io_write_start(const struct lu_env *env,
 	if (result > 0 || result == -EIOCBQUEUED) {
 		set_bit(LLIF_DATA_MODIFIED, &(ll_i2info(inode))->lli_flags);
 
-		if (result < cnt)
+		if (result != -EIOCBQUEUED && result < cnt)
 			io->ci_continue = 0;
 		if (result > 0)
 			result = 0;
+		/* move forward */
+		if (result == -EIOCBQUEUED) {
+			io->ci_nob += vio->u.readwrite.vui_written;
+			vio->vui_iocb->ki_pos = pos +
+						vio->u.readwrite.vui_written;
+		}
 	}
+
 	return result;
 }
 
@@ -1509,6 +1533,7 @@ static int vvp_io_read_ahead(const struct lu_env *env,
 	.op = {
 		[CIT_READ] = {
 			.cio_fini	= vvp_io_fini,
+			.cio_iter_init	= vvp_io_read_iter_init,
 			.cio_lock	= vvp_io_read_lock,
 			.cio_start	= vvp_io_read_start,
 			.cio_end	= vvp_io_rw_end,
diff --git a/fs/lustre/obdclass/cl_io.c b/fs/lustre/obdclass/cl_io.c
index dcf940f..1564d9f 100644
--- a/fs/lustre/obdclass/cl_io.c
+++ b/fs/lustre/obdclass/cl_io.c
@@ -695,6 +695,7 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
 int cl_io_loop(const struct lu_env *env, struct cl_io *io)
 {
 	int result = 0;
+	int rc = 0;
 
 	LINVRNT(cl_io_is_loopable(io));
 
@@ -727,7 +728,13 @@ int cl_io_loop(const struct lu_env *env, struct cl_io *io)
 			}
 		}
 		cl_io_iter_fini(env, io);
-	} while (result == 0 && io->ci_continue);
+		if (result)
+			rc = result;
+	} while ((result == 0 || result == -EIOCBQUEUED) &&
+		 io->ci_continue);
+
+	if (rc && !result)
+		result = rc;
 
 	if (result == -EWOULDBLOCK && io->ci_ndelay) {
 		io->ci_need_restart = 1;
-- 
1.8.3.1

  parent reply	other threads:[~2020-07-15 20:45 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-15 20:44 [lustre-devel] [PATCH 00/37] lustre: latest patches landed to OpenSFS 07/14/2020 James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 01/37] lustre: osc: fix osc_extent_find() James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 02/37] lustre: ldlm: check slv and limit before updating James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 03/37] lustre: sec: better struct sepol_downcall_data James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 04/37] lustre: obdclass: remove init to 0 from lustre_init_lsi() James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 05/37] lustre: ptlrpc: handle conn_hash rhashtable resize James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 06/37] lustre: lu_object: convert lu_object cache to rhashtable James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 07/37] lustre: osc: disable ext merging for rdma only pages and non-rdma James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 08/37] lnet: socklnd: fix local interface binding James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 09/37] lnet: o2iblnd: allocate init_qp_attr on stack James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 10/37] lnet: Fix some out-of-date comments James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 11/37] lnet: socklnd: don't fall-back to tcp_sendpage James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 12/37] lustre: ptlrpc: re-enterable signal_completed_replay() James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 13/37] lustre: obdcalss: ensure LCT_QUIESCENT take sync James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 14/37] lustre: remove some "#ifdef CONFIG*" from .c files James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 15/37] lustre: obdclass: use offset instead of cp_linkage James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 16/37] lustre: obdclass: re-declare cl_page variables to reduce its size James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 17/37] lustre: osc: re-declare ops_from/to to shrink osc_page James Simmons
2020-07-15 20:44 ` [lustre-devel] [PATCH 18/37] lustre: llite: Fix lock ordering in pagevec_dirty James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 19/37] lustre: misc: quiet compiler warning on armv7l James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 20/37] lustre: llite: fix to free cl_dio_aio properly James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 21/37] lnet: o2iblnd: Use ib_mtu_int_to_enum() James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 22/37] lnet: o2iblnd: wait properly for fps->increasing James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 23/37] lnet: o2iblnd: use need_resched() James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 24/37] lnet: o2iblnd: Use list_for_each_entry_safe James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 25/37] lnet: socklnd: use need_resched() James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 26/37] lnet: socklnd: use list_for_each_entry_safe() James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 27/37] lnet: socklnd: convert various refcounts to refcount_t James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 28/37] lnet: libcfs: don't call unshare_fs_struct() James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 29/37] lnet: Allow router to forward to healthier NID James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 30/37] lustre: llite: annotate non-owner locking James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 31/37] lustre: osc: consume grants for direct I/O James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 32/37] lnet: remove LNetMEUnlink and clean up related code James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 33/37] lnet: Set remote NI status in lnet_notify James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 34/37] lustre: ptlrpc: fix endless loop issue James Simmons
2020-07-15 20:45 ` James Simmons [this message]
2020-07-15 20:45 ` [lustre-devel] [PATCH 36/37] lnet: socklnd: change ksnd_nthreads to atomic_t James Simmons
2020-07-15 20:45 ` [lustre-devel] [PATCH 37/37] lnet: check rtr_nid is a gateway James Simmons

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1594845918-29027-36-git-send-email-jsimmons@infradead.org \
    --to=jsimmons@infradead.org \
    --cc=lustre-devel@lists.lustre.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).