All of lore.kernel.org
 help / color / mirror / Atom feed
From: Michael Baum <michaelba@nvidia.com>
To: dev@dpdk.org
Cc: Matan Azrad <matan@nvidia.com>,
	Raslan Darawsheh <rasland@nvidia.com>,
	Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Subject: [dpdk-dev] [PATCH v3 10/19] net/mlx5: move Rx CQ creation to common
Date: Wed,  6 Jan 2021 08:19:32 +0000	[thread overview]
Message-ID: <1609921181-5019-11-git-send-email-michaelba@nvidia.com> (raw)
In-Reply-To: <1609921181-5019-1-git-send-email-michaelba@nvidia.com>

Using common function for Rx CQ creation.

Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/mlx5.c      |   8 ---
 drivers/net/mlx5/mlx5.h      |   3 +-
 drivers/net/mlx5/mlx5_devx.c | 142 +++++++++++++------------------------------
 drivers/net/mlx5/mlx5_rxtx.h |   4 --
 4 files changed, 42 insertions(+), 115 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 91492c5..c0a36d6 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -936,14 +936,6 @@ struct mlx5_dev_ctx_shared *
 		goto error;
 	}
 	if (sh->devx) {
-		/* Query the EQN for this core. */
-		err = mlx5_glue->devx_query_eqn(sh->ctx, 0, &sh->eqn);
-		if (err) {
-			rte_errno = errno;
-			DRV_LOG(ERR, "Failed to query event queue number %d.",
-				rte_errno);
-			goto error;
-		}
 		err = mlx5_os_get_pdn(sh->pd, &sh->pdn);
 		if (err) {
 			DRV_LOG(ERR, "Fail to extract pdn from PD");
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index e61154b..079cbca 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -683,7 +683,6 @@ struct mlx5_dev_ctx_shared {
 	uint16_t bond_dev; /* Bond primary device id. */
 	uint32_t devx:1; /* Opened with DV. */
 	uint32_t flow_hit_aso_en:1; /* Flow Hit ASO is supported. */
-	uint32_t eqn; /* Event Queue number. */
 	uint32_t max_port; /* Maximal IB device port index. */
 	void *ctx; /* Verbs/DV/DevX context. */
 	void *pd; /* Protection Domain. */
@@ -791,7 +790,7 @@ struct mlx5_rxq_obj {
 		};
 		struct {
 			struct mlx5_devx_obj *rq; /* DevX Rx Queue object. */
-			struct mlx5_devx_obj *devx_cq; /* DevX CQ object. */
+			struct mlx5_devx_cq cq_obj; /* DevX CQ object. */
 			void *devx_channel;
 		};
 	};
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index af0383c..913f169 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -172,30 +172,17 @@
 }
 
 /**
- * Release the resources allocated for the Rx CQ DevX object.
+ * Destroy the Rx queue DevX object.
  *
- * @param rxq_ctrl
- *   DevX Rx queue object.
+ * @param rxq_obj
+ *   Rxq object to destroy.
  */
 static void
-mlx5_rxq_release_devx_cq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
+mlx5_rxq_release_devx_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
 {
-	struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->cq_dbrec_page;
-
-	if (rxq_ctrl->cq_umem) {
-		mlx5_os_umem_dereg(rxq_ctrl->cq_umem);
-		rxq_ctrl->cq_umem = NULL;
-	}
-	if (rxq_ctrl->rxq.cqes) {
-		rte_free((void *)(uintptr_t)rxq_ctrl->rxq.cqes);
-		rxq_ctrl->rxq.cqes = NULL;
-	}
-	if (dbr_page) {
-		claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs,
-					    mlx5_os_get_umem_id(dbr_page->umem),
-					    rxq_ctrl->cq_dbr_offset));
-		rxq_ctrl->cq_dbrec_page = NULL;
-	}
+	mlx5_rxq_release_devx_rq_resources(rxq_ctrl);
+	mlx5_devx_cq_destroy(&rxq_ctrl->obj->cq_obj);
+	memset(&rxq_ctrl->obj->cq_obj, 0, sizeof(rxq_ctrl->obj->cq_obj));
 }
 
 /**
@@ -213,14 +200,12 @@
 		mlx5_devx_modify_rq(rxq_obj, MLX5_RXQ_MOD_RDY2RST);
 		claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
 	} else {
-		MLX5_ASSERT(rxq_obj->devx_cq);
+		MLX5_ASSERT(rxq_obj->cq_obj);
 		claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
-		claim_zero(mlx5_devx_cmd_destroy(rxq_obj->devx_cq));
 		if (rxq_obj->devx_channel)
 			mlx5_os_devx_destroy_event_channel
 							(rxq_obj->devx_channel);
-		mlx5_rxq_release_devx_rq_resources(rxq_obj->rxq_ctrl);
-		mlx5_rxq_release_devx_cq_resources(rxq_obj->rxq_ctrl);
+		mlx5_rxq_release_devx_resources(rxq_obj->rxq_ctrl);
 	}
 }
 
@@ -249,7 +234,7 @@
 		rte_errno = errno;
 		return -rte_errno;
 	}
-	if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->devx_cq) {
+	if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->cq_obj.cq) {
 		rte_errno = EINVAL;
 		return -rte_errno;
 	}
@@ -327,7 +312,7 @@
 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
 	struct mlx5_devx_create_rq_attr rq_attr = { 0 };
 	uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n);
-	uint32_t cqn = rxq_ctrl->obj->devx_cq->id;
+	uint32_t cqn = rxq_ctrl->obj->cq_obj.cq->id;
 	struct mlx5_devx_dbr_page *dbr_page;
 	int64_t dbr_offset;
 	uint32_t wq_size = 0;
@@ -410,31 +395,23 @@
  *   Queue index in DPDK Rx queue array.
  *
  * @return
- *   The DevX CQ object initialized, NULL otherwise and rte_errno is set.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-static struct mlx5_devx_obj *
+static int
 mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)
 {
-	struct mlx5_devx_obj *cq_obj = 0;
+	struct mlx5_devx_cq *cq_obj = 0;
 	struct mlx5_devx_cq_attr cq_attr = { 0 };
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_dev_ctx_shared *sh = priv->sh;
 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
 	struct mlx5_rxq_ctrl *rxq_ctrl =
 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
-	size_t page_size = rte_mem_page_size();
 	unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
-	struct mlx5_devx_dbr_page *dbr_page;
-	int64_t dbr_offset;
-	void *buf = NULL;
-	uint16_t event_nums[1] = {0};
 	uint32_t log_cqe_n;
-	uint32_t cq_size;
+	uint16_t event_nums[1] = { 0 };
 	int ret = 0;
 
-	if (page_size == (size_t)-1) {
-		DRV_LOG(ERR, "Failed to get page_size.");
-		goto error;
-	}
 	if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
 	    !rxq_data->lro) {
 		cq_attr.cqe_comp_en = 1u;
@@ -487,71 +464,37 @@
 			"Port %u Rx CQE compression is disabled for LRO.",
 			dev->data->port_id);
 	}
+	cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->devx_rx_uar);
 	log_cqe_n = log2above(cqe_n);
-	cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n);
-	buf = rte_calloc_socket(__func__, 1, cq_size, page_size,
-				rxq_ctrl->socket);
-	if (!buf) {
-		DRV_LOG(ERR, "Failed to allocate memory for CQ.");
-		goto error;
-	}
-	rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)buf;
-	rxq_ctrl->cq_umem = mlx5_os_umem_reg(priv->sh->ctx, buf,
-						     cq_size,
-						     IBV_ACCESS_LOCAL_WRITE);
-	if (!rxq_ctrl->cq_umem) {
-		DRV_LOG(ERR, "Failed to register umem for CQ.");
-		goto error;
-	}
-	/* Allocate CQ door-bell. */
-	dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page);
-	if (dbr_offset < 0) {
-		DRV_LOG(ERR, "Failed to allocate CQ door-bell.");
-		goto error;
-	}
-	rxq_ctrl->cq_dbr_offset = dbr_offset;
-	rxq_ctrl->cq_dbrec_page = dbr_page;
-	rxq_data->cq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
-			  (uintptr_t)rxq_ctrl->cq_dbr_offset);
-	rxq_data->cq_uar =
-			mlx5_os_get_devx_uar_base_addr(priv->sh->devx_rx_uar);
 	/* Create CQ using DevX API. */
-	cq_attr.eqn = priv->sh->eqn;
-	cq_attr.uar_page_id =
-			mlx5_os_get_devx_uar_page_id(priv->sh->devx_rx_uar);
-	cq_attr.q_umem_id = mlx5_os_get_umem_id(rxq_ctrl->cq_umem);
-	cq_attr.q_umem_valid = 1;
-	cq_attr.log_cq_size = log_cqe_n;
-	cq_attr.log_page_size = rte_log2_u32(page_size);
-	cq_attr.db_umem_offset = rxq_ctrl->cq_dbr_offset;
-	cq_attr.db_umem_id = mlx5_os_get_umem_id(dbr_page->umem);
-	cq_attr.db_umem_valid = 1;
-	cq_obj = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr);
-	if (!cq_obj)
-		goto error;
+	ret = mlx5_devx_cq_create(sh->ctx, &rxq_ctrl->obj->cq_obj, log_cqe_n,
+				  &cq_attr, sh->numa_node);
+	if (ret)
+		return ret;
+	cq_obj = &rxq_ctrl->obj->cq_obj;
+	rxq_data->cqes = (volatile struct mlx5_cqe (*)[])
+							(uintptr_t)cq_obj->cqes;
+	rxq_data->cq_db = cq_obj->db_rec;
+	rxq_data->cq_uar = mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar);
 	rxq_data->cqe_n = log_cqe_n;
-	rxq_data->cqn = cq_obj->id;
+	rxq_data->cqn = cq_obj->cq->id;
 	if (rxq_ctrl->obj->devx_channel) {
 		ret = mlx5_os_devx_subscribe_devx_event
-						(rxq_ctrl->obj->devx_channel,
-						 cq_obj->obj,
-						 sizeof(event_nums),
-						 event_nums,
-						 (uint64_t)(uintptr_t)cq_obj);
+					      (rxq_ctrl->obj->devx_channel,
+					       cq_obj->cq->obj,
+					       sizeof(event_nums),
+					       event_nums,
+					       (uint64_t)(uintptr_t)cq_obj->cq);
 		if (ret) {
 			DRV_LOG(ERR, "Fail to subscribe CQ to event channel.");
-			rte_errno = errno;
-			goto error;
+			ret = errno;
+			mlx5_devx_cq_destroy(cq_obj);
+			memset(cq_obj, 0, sizeof(*cq_obj));
+			rte_errno = ret;
+			return -ret;
 		}
 	}
-	/* Initialise CQ to 1's to mark HW ownership for all CQEs. */
-	memset((void *)(uintptr_t)rxq_data->cqes, 0xFF, cq_size);
-	return cq_obj;
-error:
-	if (cq_obj)
-		mlx5_devx_cmd_destroy(cq_obj);
-	mlx5_rxq_release_devx_cq_resources(rxq_ctrl);
-	return NULL;
+	return 0;
 }
 
 /**
@@ -655,8 +598,8 @@
 		tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel);
 	}
 	/* Create CQ using DevX API. */
-	tmpl->devx_cq = mlx5_rxq_create_devx_cq_resources(dev, idx);
-	if (!tmpl->devx_cq) {
+	ret = mlx5_rxq_create_devx_cq_resources(dev, idx);
+	if (ret) {
 		DRV_LOG(ERR, "Failed to create CQ.");
 		goto error;
 	}
@@ -682,12 +625,9 @@
 	ret = rte_errno; /* Save rte_errno before cleanup. */
 	if (tmpl->rq)
 		claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
-	if (tmpl->devx_cq)
-		claim_zero(mlx5_devx_cmd_destroy(tmpl->devx_cq));
 	if (tmpl->devx_channel)
 		mlx5_os_devx_destroy_event_channel(tmpl->devx_channel);
-	mlx5_rxq_release_devx_rq_resources(rxq_ctrl);
-	mlx5_rxq_release_devx_cq_resources(rxq_ctrl);
+	mlx5_rxq_release_devx_resources(rxq_ctrl);
 	rte_errno = ret; /* Restore rte_errno. */
 	return -rte_errno;
 }
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 1e9345a..aba9541 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -196,11 +196,7 @@ struct mlx5_rxq_ctrl {
 	struct mlx5_devx_dbr_page *rq_dbrec_page;
 	uint64_t rq_dbr_offset;
 	/* Storing RQ door-bell information, needed when freeing door-bell. */
-	struct mlx5_devx_dbr_page *cq_dbrec_page;
-	uint64_t cq_dbr_offset;
-	/* Storing CQ door-bell information, needed when freeing door-bell. */
 	void *wq_umem; /* WQ buffer registration info. */
-	void *cq_umem; /* CQ buffer registration info. */
 	struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
 	uint32_t hairpin_status; /* Hairpin binding status. */
 };
-- 
1.8.3.1


  parent reply	other threads:[~2021-01-06  8:21 UTC|newest]

Thread overview: 57+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-17 11:44 [dpdk-dev] [PATCH 00/17] common/mlx5: share DevX resources creations Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 01/17] net/mlx5: fix ASO SQ creation error flow Michael Baum
2020-12-29  8:52   ` [dpdk-dev] [PATCH v2 00/17] common/mlx5: share DevX resources creations Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 01/17] net/mlx5: fix ASO SQ creation error flow Michael Baum
2021-01-06  8:19       ` [dpdk-dev] [PATCH v3 00/19] common/mlx5: share DevX resources creations Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 01/19] common/mlx5: fix completion queue entry size configuration Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 02/19] net/mlx5: remove CQE padding device argument Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 03/19] net/mlx5: fix ASO SQ creation error flow Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 04/19] common/mlx5: share DevX CQ creation Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 05/19] regex/mlx5: move DevX CQ creation to common Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 06/19] vdpa/mlx5: " Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 07/19] net/mlx5: move rearm and clock queue " Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 08/19] net/mlx5: move ASO " Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 09/19] net/mlx5: move Tx " Michael Baum
2021-01-06  8:19         ` Michael Baum [this message]
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 11/19] common/mlx5: enhance page size configuration Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 12/19] common/mlx5: share DevX SQ creation Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 13/19] regex/mlx5: move DevX SQ creation to common Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 14/19] net/mlx5: move rearm and clock queue " Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 15/19] net/mlx5: move Tx " Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 16/19] net/mlx5: move ASO " Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 17/19] common/mlx5: share DevX RQ creation Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 18/19] net/mlx5: move Rx RQ creation to common Michael Baum
2021-01-06  8:19         ` [dpdk-dev] [PATCH v3 19/19] common/mlx5: remove doorbell allocation API Michael Baum
2021-01-12 21:39         ` [dpdk-dev] [PATCH v3 00/19] common/mlx5: share DevX resources creations Thomas Monjalon
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 02/17] common/mlx5: share DevX CQ creation Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 03/17] regex/mlx5: move DevX CQ creation to common Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 04/17] vdpa/mlx5: " Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 05/17] net/mlx5: move rearm and clock queue " Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 06/17] net/mlx5: move ASO " Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 07/17] net/mlx5: move Tx " Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 08/17] net/mlx5: move Rx " Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 09/17] common/mlx5: enhance page size configuration Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 10/17] common/mlx5: share DevX SQ creation Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 11/17] regex/mlx5: move DevX SQ creation to common Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 12/17] net/mlx5: move rearm and clock queue " Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 13/17] net/mlx5: move Tx " Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 14/17] net/mlx5: move ASO " Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 15/17] common/mlx5: share DevX RQ creation Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 16/17] net/mlx5: move Rx RQ creation to common Michael Baum
2020-12-29  8:52     ` [dpdk-dev] [PATCH v2 17/17] common/mlx5: remove doorbell allocation API Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 02/17] common/mlx5: share DevX CQ creation Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 03/17] regex/mlx5: move DevX CQ creation to common Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 04/17] vdpa/mlx5: " Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 05/17] net/mlx5: move rearm and clock queue " Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 06/17] net/mlx5: move ASO " Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 07/17] net/mlx5: move Tx " Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 08/17] net/mlx5: move Rx " Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 09/17] common/mlx5: enhance page size configuration Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 10/17] common/mlx5: share DevX SQ creation Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 11/17] regex/mlx5: move DevX SQ creation to common Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 12/17] net/mlx5: move rearm and clock queue " Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 13/17] net/mlx5: move Tx " Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 14/17] net/mlx5: move ASO " Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 15/17] common/mlx5: share DevX RQ creation Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 16/17] net/mlx5: move Rx RQ creation to common Michael Baum
2020-12-17 11:44 ` [dpdk-dev] [PATCH 17/17] common/mlx5: remove doorbell allocation API Michael Baum

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1609921181-5019-11-git-send-email-michaelba@nvidia.com \
    --to=michaelba@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=matan@nvidia.com \
    --cc=rasland@nvidia.com \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.