All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Wilcox <willy@infradead.org>
To: Jan Kara <jack@suse.cz>
Cc: Seema Pandit <seema.pandit@intel.com>,
	linux-nvdimm <linux-nvdimm@lists.01.org>,
	Boaz Harrosh <openosd@gmail.com>,
	Linux Kernel Mailing List <linux-kernel@vger.kernel.org>,
	stable <stable@vger.kernel.org>,
	Robert Barror <robert.barror@intel.com>,
	linux-fsdevel <linux-fsdevel@vger.kernel.org>
Subject: Re: [PATCH] dax: Fix missed PMD wakeups
Date: Wed, 10 Jul 2019 20:35:55 -0700	[thread overview]
Message-ID: <20190711033555.GP32320@bombadil.infradead.org> (raw)
In-Reply-To: <20190710190204.GB14701@quack2.suse.cz>

[-- Attachment #1: Type: text/plain, Size: 317 bytes --]

On Wed, Jul 10, 2019 at 09:02:04PM +0200, Jan Kara wrote:
> So how about the attached patch? That keeps the interface sane and passes a
> smoketest for me (full fstest run running). Obviously it also needs a
> proper changelog...

Changelog and slightly massaged version along the lines of my two comments
attached.


[-- Attachment #2: 0001-dax-Fix-missed-wakeup-with-PMD-faults.patch --]
[-- Type: text/plain, Size: 6789 bytes --]

>From 57b63fdd38e7bea7eb8d6332f0163fb028570def Mon Sep 17 00:00:00 2001
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Date: Wed, 3 Jul 2019 23:21:25 -0400
Subject: [PATCH] dax: Fix missed wakeup with PMD faults

RocksDB can hang indefinitely when using a DAX file.  This is due to
a bug in the XArray conversion when handling a PMD fault and finding a
PTE entry.  We use the wrong index in the hash and end up waiting on
the wrong waitqueue.

There's actually no need to wait; if we find a PTE entry while looking
for a PMD entry, we can return immediately as we know we should fall
back to a PTE fault (which may not conflict with the lock held).

Cc: stable@vger.kernel.org
Fixes: b15cd800682f ("dax: Convert page fault handlers to XArray")
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 fs/dax.c               | 47 ++++++++++++++++++++++++------------------
 include/linux/xarray.h |  4 ++--
 2 files changed, 29 insertions(+), 22 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index 2e48c7ebb973..1ce1059af266 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -195,11 +195,13 @@ static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
  * Look up entry in page cache, wait for it to become unlocked if it
  * is a DAX entry and return it.  The caller must subsequently call
  * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
- * if it did.
+ * if it did.  The entry returned may have a larger order than @order.
+ * If @order is larger than the order of the entry found in i_pages, this
+ * function returns a CONFLICT entry.
  *
  * Must be called with the i_pages lock held.
  */
-static void *get_unlocked_entry(struct xa_state *xas)
+static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
 {
 	void *entry;
 	struct wait_exceptional_entry_queue ewait;
@@ -210,6 +212,8 @@ static void *get_unlocked_entry(struct xa_state *xas)
 
 	for (;;) {
 		entry = xas_find_conflict(xas);
+		if (dax_entry_order(entry) < order)
+			return XA_DAX_CONFLICT_ENTRY;
 		if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) ||
 				!dax_is_locked(entry))
 			return entry;
@@ -254,7 +258,7 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
 static void put_unlocked_entry(struct xa_state *xas, void *entry)
 {
 	/* If we were the only waiter woken, wake the next one */
-	if (entry)
+	if (entry && entry != XA_DAX_CONFLICT_ENTRY)
 		dax_wake_entry(xas, entry, false);
 }
 
@@ -461,7 +465,7 @@ void dax_unlock_page(struct page *page, dax_entry_t cookie)
  * overlap with xarray value entries.
  */
 static void *grab_mapping_entry(struct xa_state *xas,
-		struct address_space *mapping, unsigned long size_flag)
+		struct address_space *mapping, unsigned int order)
 {
 	unsigned long index = xas->xa_index;
 	bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */
@@ -469,20 +473,17 @@ static void *grab_mapping_entry(struct xa_state *xas,
 
 retry:
 	xas_lock_irq(xas);
-	entry = get_unlocked_entry(xas);
+	entry = get_unlocked_entry(xas, order);
 
 	if (entry) {
+		if (entry == XA_DAX_CONFLICT_ENTRY)
+			goto fallback;
 		if (!xa_is_value(entry)) {
 			xas_set_err(xas, EIO);
 			goto out_unlock;
 		}
 
-		if (size_flag & DAX_PMD) {
-			if (dax_is_pte_entry(entry)) {
-				put_unlocked_entry(xas, entry);
-				goto fallback;
-			}
-		} else { /* trying to grab a PTE entry */
+		if (order == 0) {
 			if (dax_is_pmd_entry(entry) &&
 			    (dax_is_zero_entry(entry) ||
 			     dax_is_empty_entry(entry))) {
@@ -523,7 +524,11 @@ static void *grab_mapping_entry(struct xa_state *xas,
 	if (entry) {
 		dax_lock_entry(xas, entry);
 	} else {
-		entry = dax_make_entry(pfn_to_pfn_t(0), size_flag | DAX_EMPTY);
+		unsigned long flags = DAX_EMPTY;
+
+		if (order > 0)
+			flags |= DAX_PMD;
+		entry = dax_make_entry(pfn_to_pfn_t(0), flags);
 		dax_lock_entry(xas, entry);
 		if (xas_error(xas))
 			goto out_unlock;
@@ -594,7 +599,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
 		if (WARN_ON_ONCE(!xa_is_value(entry)))
 			continue;
 		if (unlikely(dax_is_locked(entry)))
-			entry = get_unlocked_entry(&xas);
+			entry = get_unlocked_entry(&xas, 0);
 		if (entry)
 			page = dax_busy_page(entry);
 		put_unlocked_entry(&xas, entry);
@@ -621,7 +626,7 @@ static int __dax_invalidate_entry(struct address_space *mapping,
 	void *entry;
 
 	xas_lock_irq(&xas);
-	entry = get_unlocked_entry(&xas);
+	entry = get_unlocked_entry(&xas, 0);
 	if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
 		goto out;
 	if (!trunc &&
@@ -849,8 +854,11 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
 	if (unlikely(dax_is_locked(entry))) {
 		void *old_entry = entry;
 
-		entry = get_unlocked_entry(xas);
+		entry = get_unlocked_entry(xas, dax_entry_order(entry));
 
+		/* Did a PMD entry get split? */
+		if (entry == XA_DAX_CONFLICT_ENTRY)
+			goto put_unlocked;
 		/* Entry got punched out / reallocated? */
 		if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
 			goto put_unlocked;
@@ -1510,7 +1518,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 	 * entry is already in the array, for instance), it will return
 	 * VM_FAULT_FALLBACK.
 	 */
-	entry = grab_mapping_entry(&xas, mapping, DAX_PMD);
+	entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
 	if (xa_is_internal(entry)) {
 		result = xa_to_internal(entry);
 		goto fallback;
@@ -1659,11 +1667,10 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
 	vm_fault_t ret;
 
 	xas_lock_irq(&xas);
-	entry = get_unlocked_entry(&xas);
+	entry = get_unlocked_entry(&xas, order);
 	/* Did we race with someone splitting entry or so? */
-	if (!entry ||
-	    (order == 0 && !dax_is_pte_entry(entry)) ||
-	    (order == PMD_ORDER && !dax_is_pmd_entry(entry))) {
+	if (!entry || entry == XA_DAX_CONFLICT_ENTRY ||
+	    (order == 0 && !dax_is_pte_entry(entry))) {
 		put_unlocked_entry(&xas, entry);
 		xas_unlock_irq(&xas);
 		trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 052e06ff4c36..fb25452bcfa4 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -169,7 +169,9 @@ static inline bool xa_is_internal(const void *entry)
 	return ((unsigned long)entry & 3) == 2;
 }
 
+#define XA_RETRY_ENTRY		xa_mk_internal(256)
 #define XA_ZERO_ENTRY		xa_mk_internal(257)
+#define XA_DAX_CONFLICT_ENTRY	xa_mk_internal(258)
 
 /**
  * xa_is_zero() - Is the entry a zero entry?
@@ -1213,8 +1215,6 @@ static inline bool xa_is_sibling(const void *entry)
 		(entry < xa_mk_sibling(XA_CHUNK_SIZE - 1));
 }
 
-#define XA_RETRY_ENTRY		xa_mk_internal(256)
-
 /**
  * xa_is_retry() - Is the entry a retry entry?
  * @entry: Entry retrieved from the XArray
-- 
2.20.1


[-- Attachment #3: Type: text/plain, Size: 151 bytes --]

_______________________________________________
Linux-nvdimm mailing list
Linux-nvdimm@lists.01.org
https://lists.01.org/mailman/listinfo/linux-nvdimm

WARNING: multiple messages have this Message-ID (diff)
From: Matthew Wilcox <willy@infradead.org>
To: Jan Kara <jack@suse.cz>
Cc: Dan Williams <dan.j.williams@intel.com>,
	linux-fsdevel <linux-fsdevel@vger.kernel.org>,
	Boaz Harrosh <openosd@gmail.com>, stable <stable@vger.kernel.org>,
	Robert Barror <robert.barror@intel.com>,
	Seema Pandit <seema.pandit@intel.com>,
	linux-nvdimm <linux-nvdimm@lists.01.org>,
	Linux Kernel Mailing List <linux-kernel@vger.kernel.org>
Subject: Re: [PATCH] dax: Fix missed PMD wakeups
Date: Wed, 10 Jul 2019 20:35:55 -0700	[thread overview]
Message-ID: <20190711033555.GP32320@bombadil.infradead.org> (raw)
In-Reply-To: <20190710190204.GB14701@quack2.suse.cz>

[-- Attachment #1: Type: text/plain, Size: 317 bytes --]

On Wed, Jul 10, 2019 at 09:02:04PM +0200, Jan Kara wrote:
> So how about the attached patch? That keeps the interface sane and passes a
> smoketest for me (full fstest run running). Obviously it also needs a
> proper changelog...

Changelog and slightly massaged version along the lines of my two comments
attached.


[-- Attachment #2: 0001-dax-Fix-missed-wakeup-with-PMD-faults.patch --]
[-- Type: text/plain, Size: 6788 bytes --]

From 57b63fdd38e7bea7eb8d6332f0163fb028570def Mon Sep 17 00:00:00 2001
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Date: Wed, 3 Jul 2019 23:21:25 -0400
Subject: [PATCH] dax: Fix missed wakeup with PMD faults

RocksDB can hang indefinitely when using a DAX file.  This is due to
a bug in the XArray conversion when handling a PMD fault and finding a
PTE entry.  We use the wrong index in the hash and end up waiting on
the wrong waitqueue.

There's actually no need to wait; if we find a PTE entry while looking
for a PMD entry, we can return immediately as we know we should fall
back to a PTE fault (which may not conflict with the lock held).

Cc: stable@vger.kernel.org
Fixes: b15cd800682f ("dax: Convert page fault handlers to XArray")
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 fs/dax.c               | 47 ++++++++++++++++++++++++------------------
 include/linux/xarray.h |  4 ++--
 2 files changed, 29 insertions(+), 22 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index 2e48c7ebb973..1ce1059af266 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -195,11 +195,13 @@ static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
  * Look up entry in page cache, wait for it to become unlocked if it
  * is a DAX entry and return it.  The caller must subsequently call
  * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
- * if it did.
+ * if it did.  The entry returned may have a larger order than @order.
+ * If @order is larger than the order of the entry found in i_pages, this
+ * function returns a CONFLICT entry.
  *
  * Must be called with the i_pages lock held.
  */
-static void *get_unlocked_entry(struct xa_state *xas)
+static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
 {
 	void *entry;
 	struct wait_exceptional_entry_queue ewait;
@@ -210,6 +212,8 @@ static void *get_unlocked_entry(struct xa_state *xas)
 
 	for (;;) {
 		entry = xas_find_conflict(xas);
+		if (dax_entry_order(entry) < order)
+			return XA_DAX_CONFLICT_ENTRY;
 		if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) ||
 				!dax_is_locked(entry))
 			return entry;
@@ -254,7 +258,7 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
 static void put_unlocked_entry(struct xa_state *xas, void *entry)
 {
 	/* If we were the only waiter woken, wake the next one */
-	if (entry)
+	if (entry && entry != XA_DAX_CONFLICT_ENTRY)
 		dax_wake_entry(xas, entry, false);
 }
 
@@ -461,7 +465,7 @@ void dax_unlock_page(struct page *page, dax_entry_t cookie)
  * overlap with xarray value entries.
  */
 static void *grab_mapping_entry(struct xa_state *xas,
-		struct address_space *mapping, unsigned long size_flag)
+		struct address_space *mapping, unsigned int order)
 {
 	unsigned long index = xas->xa_index;
 	bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */
@@ -469,20 +473,17 @@ static void *grab_mapping_entry(struct xa_state *xas,
 
 retry:
 	xas_lock_irq(xas);
-	entry = get_unlocked_entry(xas);
+	entry = get_unlocked_entry(xas, order);
 
 	if (entry) {
+		if (entry == XA_DAX_CONFLICT_ENTRY)
+			goto fallback;
 		if (!xa_is_value(entry)) {
 			xas_set_err(xas, EIO);
 			goto out_unlock;
 		}
 
-		if (size_flag & DAX_PMD) {
-			if (dax_is_pte_entry(entry)) {
-				put_unlocked_entry(xas, entry);
-				goto fallback;
-			}
-		} else { /* trying to grab a PTE entry */
+		if (order == 0) {
 			if (dax_is_pmd_entry(entry) &&
 			    (dax_is_zero_entry(entry) ||
 			     dax_is_empty_entry(entry))) {
@@ -523,7 +524,11 @@ static void *grab_mapping_entry(struct xa_state *xas,
 	if (entry) {
 		dax_lock_entry(xas, entry);
 	} else {
-		entry = dax_make_entry(pfn_to_pfn_t(0), size_flag | DAX_EMPTY);
+		unsigned long flags = DAX_EMPTY;
+
+		if (order > 0)
+			flags |= DAX_PMD;
+		entry = dax_make_entry(pfn_to_pfn_t(0), flags);
 		dax_lock_entry(xas, entry);
 		if (xas_error(xas))
 			goto out_unlock;
@@ -594,7 +599,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
 		if (WARN_ON_ONCE(!xa_is_value(entry)))
 			continue;
 		if (unlikely(dax_is_locked(entry)))
-			entry = get_unlocked_entry(&xas);
+			entry = get_unlocked_entry(&xas, 0);
 		if (entry)
 			page = dax_busy_page(entry);
 		put_unlocked_entry(&xas, entry);
@@ -621,7 +626,7 @@ static int __dax_invalidate_entry(struct address_space *mapping,
 	void *entry;
 
 	xas_lock_irq(&xas);
-	entry = get_unlocked_entry(&xas);
+	entry = get_unlocked_entry(&xas, 0);
 	if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
 		goto out;
 	if (!trunc &&
@@ -849,8 +854,11 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
 	if (unlikely(dax_is_locked(entry))) {
 		void *old_entry = entry;
 
-		entry = get_unlocked_entry(xas);
+		entry = get_unlocked_entry(xas, dax_entry_order(entry));
 
+		/* Did a PMD entry get split? */
+		if (entry == XA_DAX_CONFLICT_ENTRY)
+			goto put_unlocked;
 		/* Entry got punched out / reallocated? */
 		if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
 			goto put_unlocked;
@@ -1510,7 +1518,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 	 * entry is already in the array, for instance), it will return
 	 * VM_FAULT_FALLBACK.
 	 */
-	entry = grab_mapping_entry(&xas, mapping, DAX_PMD);
+	entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
 	if (xa_is_internal(entry)) {
 		result = xa_to_internal(entry);
 		goto fallback;
@@ -1659,11 +1667,10 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
 	vm_fault_t ret;
 
 	xas_lock_irq(&xas);
-	entry = get_unlocked_entry(&xas);
+	entry = get_unlocked_entry(&xas, order);
 	/* Did we race with someone splitting entry or so? */
-	if (!entry ||
-	    (order == 0 && !dax_is_pte_entry(entry)) ||
-	    (order == PMD_ORDER && !dax_is_pmd_entry(entry))) {
+	if (!entry || entry == XA_DAX_CONFLICT_ENTRY ||
+	    (order == 0 && !dax_is_pte_entry(entry))) {
 		put_unlocked_entry(&xas, entry);
 		xas_unlock_irq(&xas);
 		trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 052e06ff4c36..fb25452bcfa4 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -169,7 +169,9 @@ static inline bool xa_is_internal(const void *entry)
 	return ((unsigned long)entry & 3) == 2;
 }
 
+#define XA_RETRY_ENTRY		xa_mk_internal(256)
 #define XA_ZERO_ENTRY		xa_mk_internal(257)
+#define XA_DAX_CONFLICT_ENTRY	xa_mk_internal(258)
 
 /**
  * xa_is_zero() - Is the entry a zero entry?
@@ -1213,8 +1215,6 @@ static inline bool xa_is_sibling(const void *entry)
 		(entry < xa_mk_sibling(XA_CHUNK_SIZE - 1));
 }
 
-#define XA_RETRY_ENTRY		xa_mk_internal(256)
-
 /**
  * xa_is_retry() - Is the entry a retry entry?
  * @entry: Entry retrieved from the XArray
-- 
2.20.1


  parent reply	other threads:[~2019-07-11  3:38 UTC|newest]

Thread overview: 56+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-07-03  7:24 [PATCH] dax: Fix missed PMD wakeups Dan Williams
2019-07-03  7:24 ` Dan Williams
2019-07-03 12:17 ` Matthew Wilcox
2019-07-03 12:17   ` Matthew Wilcox
2019-07-03 17:01   ` Dan Williams
2019-07-03 17:01     ` Dan Williams
2019-07-03 19:53     ` Matthew Wilcox
2019-07-03 19:53       ` Matthew Wilcox
2019-07-03 21:28       ` Dan Williams
2019-07-03 21:28         ` Dan Williams
2019-07-04  3:27         ` Matthew Wilcox
2019-07-04  3:27           ` Matthew Wilcox
2019-07-04 13:00           ` Boaz Harrosh
2019-07-04 13:00             ` Boaz Harrosh
2019-07-04 13:58             ` Matthew Wilcox
2019-07-04 13:58               ` Matthew Wilcox
2019-07-04 14:32               ` Boaz Harrosh
2019-07-04 14:32                 ` Boaz Harrosh
2019-07-04 16:54           ` Jan Kara
2019-07-04 16:54             ` Jan Kara
2019-07-04 19:14             ` Matthew Wilcox
2019-07-04 19:14               ` Matthew Wilcox
2019-07-04 23:27               ` Dan Williams
2019-07-04 23:27                 ` Dan Williams
2019-07-05 19:10                 ` Matthew Wilcox
2019-07-05 19:10                   ` Matthew Wilcox
2019-07-05 20:47                   ` Dan Williams
2019-07-05 20:47                     ` Dan Williams
2019-07-10 19:02                     ` Jan Kara
2019-07-10 19:02                       ` Jan Kara
2019-07-10 20:15                       ` Matthew Wilcox
2019-07-10 20:15                         ` Matthew Wilcox
2019-07-10 20:26                         ` Jan Kara
2019-07-10 20:26                           ` Jan Kara
2019-07-11 14:13                           ` Matthew Wilcox
2019-07-11 14:13                             ` Matthew Wilcox
2019-07-11 15:25                             ` Matthew Wilcox
2019-07-11 15:25                               ` Matthew Wilcox
2019-07-11 15:41                               ` Jan Kara
2019-07-11 15:41                                 ` Jan Kara
2019-07-17  3:39                                 ` Dan Williams
2019-07-17  3:39                                   ` Dan Williams
2019-07-29 12:02                                   ` Jan Kara
2019-07-29 12:02                                     ` Jan Kara
2019-07-29 15:18                                     ` Dan Williams
2019-07-29 15:18                                       ` Dan Williams
2019-07-11  3:08                       ` Matthew Wilcox
2019-07-11  3:08                         ` Matthew Wilcox
2019-07-11  7:48                         ` Jan Kara
2019-07-11  7:48                           ` Jan Kara
2019-07-11 13:28                           ` Matthew Wilcox
2019-07-11 13:28                             ` Matthew Wilcox
2019-07-11  3:35                       ` Matthew Wilcox [this message]
2019-07-11  3:35                         ` Matthew Wilcox
2019-07-11  8:06                         ` Jan Kara
2019-07-11  8:06                           ` Jan Kara

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190711033555.GP32320@bombadil.infradead.org \
    --to=willy@infradead.org \
    --cc=jack@suse.cz \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-nvdimm@lists.01.org \
    --cc=openosd@gmail.com \
    --cc=robert.barror@intel.com \
    --cc=seema.pandit@intel.com \
    --cc=stable@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.