All of lore.kernel.org
 help / color / mirror / Atom feed
From: Josh Durgin <josh.durgin@inktank.com>
To: Alex Elder <elder@inktank.com>
Cc: ceph-devel@vger.kernel.org
Subject: Re: [PATCH 12/16] rbd: use rbd_dev consistently
Date: Wed, 11 Jul 2012 13:56:19 -0700	[thread overview]
Message-ID: <4FFDE873.3000703@inktank.com> (raw)
In-Reply-To: <4FFD877F.404@inktank.com>

Reviewed-by: Josh Durgin <josh.durgin@inktank.com>

On 07/11/2012 07:02 AM, Alex Elder wrote:
> Most variables that represent a struct rbd_device are named
> "rbd_dev", but in some cases "dev" is used instead.  Change all the
> "dev" references so they use "rbd_dev" consistently, to make it
> clear from the name that we're working with an RBD device (as
> opposed to, for example, a struct device).  Similarly, change the
> name of the "dev" field in struct rbd_notify_info to be "rbd_dev".
>
> Signed-off-by: Alex Elder<elder@inktank.com>
> ---
>   drivers/block/rbd.c |  125
> ++++++++++++++++++++++++++-------------------------
>   1 files changed, 64 insertions(+), 61 deletions(-)
>
> diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
> index 90fb388..c38246b 100644
> --- a/drivers/block/rbd.c
> +++ b/drivers/block/rbd.c
> @@ -581,35 +581,36 @@ static int snap_by_name(struct rbd_image_header
> *header, const char *snap_name,
>   	return -ENOENT;
>   }
>
> -static int rbd_header_set_snap(struct rbd_device *dev, u64 *size)
> +static int rbd_header_set_snap(struct rbd_device *rbd_dev, u64 *size)
>   {
> -	struct rbd_image_header *header =&dev->header;
> +	struct rbd_image_header *header =&rbd_dev->header;
>   	struct ceph_snap_context *snapc = header->snapc;
>   	int ret = -ENOENT;
>
> -	down_write(&dev->header_rwsem);
> +	down_write(&rbd_dev->header_rwsem);
>
> -	if (!memcmp(dev->snap_name, RBD_SNAP_HEAD_NAME,
> +	if (!memcmp(rbd_dev->snap_name, RBD_SNAP_HEAD_NAME,
>   		    sizeof (RBD_SNAP_HEAD_NAME))) {
>   		if (header->total_snaps)
>   			snapc->seq = header->snap_seq;
>   		else
>   			snapc->seq = 0;
> -		dev->snap_id = CEPH_NOSNAP;
> -		dev->read_only = 0;
> +		rbd_dev->snap_id = CEPH_NOSNAP;
> +		rbd_dev->read_only = 0;
>   		if (size)
>   			*size = header->image_size;
>   	} else {
> -		ret = snap_by_name(header, dev->snap_name,&snapc->seq, size);
> +		ret = snap_by_name(header, rbd_dev->snap_name,
> +					&snapc->seq, size);
>   		if (ret<  0)
>   			goto done;
> -		dev->snap_id = snapc->seq;
> -		dev->read_only = 1;
> +		rbd_dev->snap_id = snapc->seq;
> +		rbd_dev->read_only = 1;
>   	}
>
>   	ret = 0;
>   done:
> -	up_write(&dev->header_rwsem);
> +	up_write(&rbd_dev->header_rwsem);
>   	return ret;
>   }
>
> @@ -853,7 +854,7 @@ static void rbd_coll_end_req(struct rbd_request *req,
>    * Send ceph osd request
>    */
>   static int rbd_do_request(struct request *rq,
> -			  struct rbd_device *dev,
> +			  struct rbd_device *rbd_dev,
>   			  struct ceph_snap_context *snapc,
>   			  u64 snapid,
>   			  const char *obj, u64 ofs, u64 len,
> @@ -894,13 +895,13 @@ static int rbd_do_request(struct request *rq,
>
>   	dout("rbd_do_request obj=%s ofs=%lld len=%lld\n", obj, len, ofs);
>
> -	down_read(&dev->header_rwsem);
> +	down_read(&rbd_dev->header_rwsem);
>
> -	osdc =&dev->rbd_client->client->osdc;
> +	osdc =&rbd_dev->rbd_client->client->osdc;
>   	req = ceph_osdc_alloc_request(osdc, flags, snapc, ops,
>   					false, GFP_NOIO, pages, bio);
>   	if (!req) {
> -		up_read(&dev->header_rwsem);
> +		up_read(&rbd_dev->header_rwsem);
>   		ret = -ENOMEM;
>   		goto done_pages;
>   	}
> @@ -925,7 +926,7 @@ static int rbd_do_request(struct request *rq,
>   	layout->fl_stripe_unit = cpu_to_le32(1<<  RBD_MAX_OBJ_ORDER);
>   	layout->fl_stripe_count = cpu_to_le32(1);
>   	layout->fl_object_size = cpu_to_le32(1<<  RBD_MAX_OBJ_ORDER);
> -	layout->fl_pg_pool = cpu_to_le32(dev->pool_id);
> +	layout->fl_pg_pool = cpu_to_le32(rbd_dev->pool_id);
>   	ceph_calc_raw_layout(osdc, layout, snapid, ofs,&len,&bno,
>   				req, ops);
>
> @@ -934,7 +935,7 @@ static int rbd_do_request(struct request *rq,
>   				snapc,
>   				&mtime,
>   				req->r_oid, req->r_oid_len);
> -	up_read(&dev->header_rwsem);
> +	up_read(&rbd_dev->header_rwsem);
>
>   	if (linger_req) {
>   		ceph_osdc_set_request_linger(osdc, req);
> @@ -1011,7 +1012,7 @@ static void rbd_simple_req_cb(struct
> ceph_osd_request *req, struct ceph_msg *msg
>   /*
>    * Do a synchronous ceph osd operation
>    */
> -static int rbd_req_sync_op(struct rbd_device *dev,
> +static int rbd_req_sync_op(struct rbd_device *rbd_dev,
>   			   struct ceph_snap_context *snapc,
>   			   u64 snapid,
>   			   int opcode,
> @@ -1048,7 +1049,7 @@ static int rbd_req_sync_op(struct rbd_device *dev,
>   		}
>   	}
>
> -	ret = rbd_do_request(NULL, dev, snapc, snapid,
> +	ret = rbd_do_request(NULL, rbd_dev, snapc, snapid,
>   			  obj, ofs, len, NULL,
>   			  pages, num_pages,
>   			  flags,
> @@ -1075,7 +1076,7 @@ done:
>    * Do an asynchronous ceph osd operation
>    */
>   static int rbd_do_op(struct request *rq,
> -		     struct rbd_device *rbd_dev ,
> +		     struct rbd_device *rbd_dev,
>   		     struct ceph_snap_context *snapc,
>   		     u64 snapid,
>   		     int opcode, int flags, int num_reply,
> @@ -1167,7 +1168,7 @@ static int rbd_req_read(struct request *rq,
>   /*
>    * Request sync osd read
>    */
> -static int rbd_req_sync_read(struct rbd_device *dev,
> +static int rbd_req_sync_read(struct rbd_device *rbd_dev,
>   			  struct ceph_snap_context *snapc,
>   			  u64 snapid,
>   			  const char *obj,
> @@ -1175,7 +1176,7 @@ static int rbd_req_sync_read(struct rbd_device *dev,
>   			  char *buf,
>   			  u64 *ver)
>   {
> -	return rbd_req_sync_op(dev, NULL,
> +	return rbd_req_sync_op(rbd_dev, NULL,
>   			       snapid,
>   			       CEPH_OSD_OP_READ,
>   			       CEPH_OSD_FLAG_READ,
> @@ -1186,7 +1187,7 @@ static int rbd_req_sync_read(struct rbd_device *dev,
>   /*
>    * Request sync osd watch
>    */
> -static int rbd_req_sync_notify_ack(struct rbd_device *dev,
> +static int rbd_req_sync_notify_ack(struct rbd_device *rbd_dev,
>   				   u64 ver,
>   				   u64 notify_id,
>   				   const char *obj)
> @@ -1198,11 +1199,11 @@ static int rbd_req_sync_notify_ack(struct
> rbd_device *dev,
>   	if (ret<  0)
>   		return ret;
>
> -	ops[0].watch.ver = cpu_to_le64(dev->header.obj_version);
> +	ops[0].watch.ver = cpu_to_le64(rbd_dev->header.obj_version);
>   	ops[0].watch.cookie = notify_id;
>   	ops[0].watch.flag = 0;
>
> -	ret = rbd_do_request(NULL, dev, NULL, CEPH_NOSNAP,
> +	ret = rbd_do_request(NULL, rbd_dev, NULL, CEPH_NOSNAP,
>   			  obj, 0, 0, NULL,
>   			  NULL, 0,
>   			  CEPH_OSD_FLAG_READ,
> @@ -1217,54 +1218,54 @@ static int rbd_req_sync_notify_ack(struct
> rbd_device *dev,
>
>   static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
>   {
> -	struct rbd_device *dev = (struct rbd_device *)data;
> +	struct rbd_device *rbd_dev = (struct rbd_device *)data;
>   	int rc;
>
> -	if (!dev)
> +	if (!rbd_dev)
>   		return;
>
> -	dout("rbd_watch_cb %s notify_id=%lld opcode=%d\n", dev->obj_md_name,
> +	dout("rbd_watch_cb %s notify_id=%lld opcode=%d\n", rbd_dev->obj_md_name,
>   		notify_id, (int)opcode);
>   	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
> -	rc = __rbd_refresh_header(dev);
> +	rc = __rbd_refresh_header(rbd_dev);
>   	mutex_unlock(&ctl_mutex);
>   	if (rc)
>   		pr_warning(RBD_DRV_NAME "%d got notification but failed to "
> -			   " update snaps: %d\n", dev->major, rc);
> +			   " update snaps: %d\n", rbd_dev->major, rc);
>
> -	rbd_req_sync_notify_ack(dev, ver, notify_id, dev->obj_md_name);
> +	rbd_req_sync_notify_ack(rbd_dev, ver, notify_id, rbd_dev->obj_md_name);
>   }
>
>   /*
>    * Request sync osd watch
>    */
> -static int rbd_req_sync_watch(struct rbd_device *dev,
> +static int rbd_req_sync_watch(struct rbd_device *rbd_dev,
>   			      const char *obj,
>   			      u64 ver)
>   {
>   	struct ceph_osd_req_op *ops;
> -	struct ceph_osd_client *osdc =&dev->rbd_client->client->osdc;
> +	struct ceph_osd_client *osdc =&rbd_dev->rbd_client->client->osdc;
>
>   	int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_WATCH, 0);
>   	if (ret<  0)
>   		return ret;
>
>   	ret = ceph_osdc_create_event(osdc, rbd_watch_cb, 0,
> -				     (void *)dev,&dev->watch_event);
> +				     (void *)rbd_dev,&rbd_dev->watch_event);
>   	if (ret<  0)
>   		goto fail;
>
>   	ops[0].watch.ver = cpu_to_le64(ver);
> -	ops[0].watch.cookie = cpu_to_le64(dev->watch_event->cookie);
> +	ops[0].watch.cookie = cpu_to_le64(rbd_dev->watch_event->cookie);
>   	ops[0].watch.flag = 1;
>
> -	ret = rbd_req_sync_op(dev, NULL,
> +	ret = rbd_req_sync_op(rbd_dev, NULL,
>   			      CEPH_NOSNAP,
>   			      0,
>   			      CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
>   			      ops,
>   			      1, obj, 0, 0, NULL,
> -			&dev->watch_request, NULL);
> +			&rbd_dev->watch_request, NULL);
>
>   	if (ret<  0)
>   		goto fail_event;
> @@ -1273,8 +1274,8 @@ static int rbd_req_sync_watch(struct rbd_device *dev,
>   	return 0;
>
>   fail_event:
> -	ceph_osdc_cancel_event(dev->watch_event);
> -	dev->watch_event = NULL;
> +	ceph_osdc_cancel_event(rbd_dev->watch_event);
> +	rbd_dev->watch_event = NULL;
>   fail:
>   	rbd_destroy_ops(ops);
>   	return ret;
> @@ -1283,7 +1284,7 @@ fail:
>   /*
>    * Request sync osd unwatch
>    */
> -static int rbd_req_sync_unwatch(struct rbd_device *dev,
> +static int rbd_req_sync_unwatch(struct rbd_device *rbd_dev,
>   				const char *obj)
>   {
>   	struct ceph_osd_req_op *ops;
> @@ -1293,10 +1294,10 @@ static int rbd_req_sync_unwatch(struct
> rbd_device *dev,
>   		return ret;
>
>   	ops[0].watch.ver = 0;
> -	ops[0].watch.cookie = cpu_to_le64(dev->watch_event->cookie);
> +	ops[0].watch.cookie = cpu_to_le64(rbd_dev->watch_event->cookie);
>   	ops[0].watch.flag = 0;
>
> -	ret = rbd_req_sync_op(dev, NULL,
> +	ret = rbd_req_sync_op(rbd_dev, NULL,
>   			      CEPH_NOSNAP,
>   			      0,
>   			      CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
> @@ -1304,33 +1305,34 @@ static int rbd_req_sync_unwatch(struct
> rbd_device *dev,
>   			      1, obj, 0, 0, NULL, NULL, NULL);
>
>   	rbd_destroy_ops(ops);
> -	ceph_osdc_cancel_event(dev->watch_event);
> -	dev->watch_event = NULL;
> +	ceph_osdc_cancel_event(rbd_dev->watch_event);
> +	rbd_dev->watch_event = NULL;
>   	return ret;
>   }
>
>   struct rbd_notify_info {
> -	struct rbd_device *dev;
> +	struct rbd_device *rbd_dev;
>   };
>
>   static void rbd_notify_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
>   {
> -	struct rbd_device *dev = (struct rbd_device *)data;
> -	if (!dev)
> +	struct rbd_device *rbd_dev = (struct rbd_device *)data;
> +	if (!rbd_dev)
>   		return;
>
> -	dout("rbd_notify_cb %s notify_id=%lld opcode=%d\n", dev->obj_md_name,
> +	dout("rbd_notify_cb %s notify_id=%lld opcode=%d\n",
> +				rbd_dev->obj_md_name,
>   		notify_id, (int)opcode);
>   }
>
>   /*
>    * Request sync osd notify
>    */
> -static int rbd_req_sync_notify(struct rbd_device *dev,
> +static int rbd_req_sync_notify(struct rbd_device *rbd_dev,
>   		          const char *obj)
>   {
>   	struct ceph_osd_req_op *ops;
> -	struct ceph_osd_client *osdc =&dev->rbd_client->client->osdc;
> +	struct ceph_osd_client *osdc =&rbd_dev->rbd_client->client->osdc;
>   	struct ceph_osd_event *event;
>   	struct rbd_notify_info info;
>   	int payload_len = sizeof(u32) + sizeof(u32);
> @@ -1340,7 +1342,7 @@ static int rbd_req_sync_notify(struct rbd_device *dev,
>   	if (ret<  0)
>   		return ret;
>
> -	info.dev = dev;
> +	info.rbd_dev = rbd_dev;
>
>   	ret = ceph_osdc_create_event(osdc, rbd_notify_cb, 1,
>   				     (void *)&info,&event);
> @@ -1353,7 +1355,7 @@ static int rbd_req_sync_notify(struct rbd_device *dev,
>   	ops[0].watch.prot_ver = RADOS_NOTIFY_VER;
>   	ops[0].watch.timeout = 12;
>
> -	ret = rbd_req_sync_op(dev, NULL,
> +	ret = rbd_req_sync_op(rbd_dev, NULL,
>   			       CEPH_NOSNAP,
>   			       0,
>   			       CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
> @@ -1377,7 +1379,7 @@ fail:
>   /*
>    * Request sync osd read
>    */
> -static int rbd_req_sync_exec(struct rbd_device *dev,
> +static int rbd_req_sync_exec(struct rbd_device *rbd_dev,
>   			     const char *obj,
>   			     const char *cls,
>   			     const char *method,
> @@ -1401,7 +1403,7 @@ static int rbd_req_sync_exec(struct rbd_device *dev,
>   	ops[0].cls.indata = data;
>   	ops[0].cls.indata_len = len;
>
> -	ret = rbd_req_sync_op(dev, NULL,
> +	ret = rbd_req_sync_op(rbd_dev, NULL,
>   			       CEPH_NOSNAP,
>   			       0,
>   			       CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
> @@ -1632,7 +1634,7 @@ out_dh:
>   /*
>    * create a snapshot
>    */
> -static int rbd_header_add_snap(struct rbd_device *dev,
> +static int rbd_header_add_snap(struct rbd_device *rbd_dev,
>   			       const char *snap_name,
>   			       gfp_t gfp_flags)
>   {
> @@ -1644,11 +1646,11 @@ static int rbd_header_add_snap(struct rbd_device
> *dev,
>   	struct ceph_mon_client *monc;
>
>   	/* we should create a snapshot only if we're pointing at the head */
> -	if (dev->snap_id != CEPH_NOSNAP)
> +	if (rbd_dev->snap_id != CEPH_NOSNAP)
>   		return -EINVAL;
>
> -	monc =&dev->rbd_client->client->monc;
> -	ret = ceph_monc_create_snapid(monc, dev->pool_id,&new_snapid);
> +	monc =&rbd_dev->rbd_client->client->monc;
> +	ret = ceph_monc_create_snapid(monc, rbd_dev->pool_id,&new_snapid);
>   	dout("created snapid=%lld\n", new_snapid);
>   	if (ret<  0)
>   		return ret;
> @@ -1663,7 +1665,8 @@ static int rbd_header_add_snap(struct rbd_device *dev,
>   	ceph_encode_string_safe(&p, e, snap_name, name_len, bad);
>   	ceph_encode_64_safe(&p, e, new_snapid, bad);
>
> -	ret = rbd_req_sync_exec(dev, dev->obj_md_name, "rbd", "snap_add",
> +	ret = rbd_req_sync_exec(rbd_dev, rbd_dev->obj_md_name,
> +				"rbd", "snap_add",
>   				data, p - data,&ver);
>
>   	kfree(data);
> @@ -1671,9 +1674,9 @@ static int rbd_header_add_snap(struct rbd_device *dev,
>   	if (ret<  0)
>   		return ret;
>
> -	down_write(&dev->header_rwsem);
> -	dev->header.snapc->seq = new_snapid;
> -	up_write(&dev->header_rwsem);
> +	down_write(&rbd_dev->header_rwsem);
> +	rbd_dev->header.snapc->seq = new_snapid;
> +	up_write(&rbd_dev->header_rwsem);
>
>   	return 0;
>   bad:


  reply	other threads:[~2012-07-11 20:56 UTC|newest]

Thread overview: 69+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-07-11 13:49 [PATCH 00/16] rbd: updates and enhancements Alex Elder
2012-07-11 14:00 ` [PATCH 01/16] libceph: fix off-by-one bug in ceph_encode_filepath() Alex Elder
2012-07-11 16:59   ` Yehuda Sadeh
2012-07-11 18:35   ` Josh Durgin
2012-07-11 14:00 ` [PATCH 02/16] rbd: drop a useless local variable Alex Elder
2012-07-11 16:58   ` Yehuda Sadeh Weinraub
2012-07-11 18:36   ` Josh Durgin
2012-07-11 14:00 ` [PATCH 03/16] libceph: define ceph_decode_string() Alex Elder
2012-07-11 17:13   ` Yehuda Sadeh
2012-07-11 18:43   ` Josh Durgin
2012-07-11 22:09   ` [PATCH v2 " Alex Elder
2012-07-12 17:13     ` Alex Elder
2012-07-11 14:01 ` [PATCH 04/16] libceph: define ceph_extract_encoded_string() Alex Elder
2012-07-11 17:20   ` Yehuda Sadeh
2012-07-11 17:45     ` Sage Weil
2012-07-11 19:14     ` Alex Elder
2012-07-11 19:26       ` Yehuda Sadeh
2012-07-11 22:10   ` [PATCH v2 " Alex Elder
2012-07-12 17:13     ` [PATCH v3 " Alex Elder
2012-07-12 18:20       ` Sage Weil
2012-07-12 19:48         ` Alex Elder
2012-07-12 22:47         ` Alex Elder
2012-07-12 22:47   ` [PATCH v4 " Alex Elder
2012-07-11 14:01 ` [PATCH 05/16] rbd: define dup_token() Alex Elder
2012-07-11 17:48   ` Yehuda Sadeh
2012-07-11 21:50     ` Alex Elder
2012-07-11 18:50   ` Josh Durgin
2012-07-11 14:01 ` [PATCH 06/16] rbd: rename rbd_dev->block_name Alex Elder
2012-07-11 17:55   ` Yehuda Sadeh
2012-07-11 19:02   ` Josh Durgin
2012-07-11 22:13     ` Alex Elder
2012-07-11 14:01 ` [PATCH 07/16] rbd: dynamically allocate object prefix Alex Elder
2012-07-11 19:12   ` Josh Durgin
2012-07-11 19:17     ` Alex Elder
2012-07-12 17:24   ` [PATCH v2 " Alex Elder
2012-07-12 17:42     ` Josh Durgin
2012-07-11 14:02 ` [PATCH 08/16] rbd: don't store pool name in struct rbd_dev Alex Elder
2012-07-11 19:36   ` Josh Durgin
2012-07-11 20:19     ` Sage Weil
2012-07-11 22:25     ` Alex Elder
2012-07-11 23:32       ` Josh Durgin
2012-07-12  2:59     ` Alex Elder
2012-07-12  4:19       ` Josh Durgin
2012-07-12 17:05   ` Alex Elder
2012-07-12 17:05   ` [PATCH] rbd: create pool_id device attribute Alex Elder
2012-07-12 17:16     ` Josh Durgin
2012-07-12 17:35       ` Alex Elder
2012-07-12 17:05   ` [PATCH] rbd: dynamically allocate pool name Alex Elder
2012-07-12 17:21     ` Josh Durgin
2012-07-11 14:02 ` [PATCH 09/16] rbd: dynamically allocate image header name Alex Elder
2012-07-11 20:41   ` Josh Durgin
2012-07-11 14:02 ` [PATCH 10/16] rbd: dynamically allocate image name Alex Elder
2012-07-11 20:49   ` Josh Durgin
2012-07-11 20:52     ` Josh Durgin
2012-07-12 11:12       ` Alex Elder
2012-07-11 14:02 ` [PATCH 11/16] rbd: dynamically allocate snapshot name Alex Elder
2012-07-11 20:53   ` Josh Durgin
2012-07-11 14:02 ` [PATCH 12/16] rbd: use rbd_dev consistently Alex Elder
2012-07-11 20:56   ` Josh Durgin [this message]
2012-07-11 14:02 ` [PATCH 13/16] rbd: rename some fields in struct rbd_dev Alex Elder
2012-07-11 21:01   ` Josh Durgin
2012-07-12 11:14     ` Alex Elder
2012-07-11 14:02 ` [PATCH 14/16] rbd: more symbol renames Alex Elder
2012-07-11 21:03   ` Josh Durgin
2012-07-12 11:15     ` Alex Elder
2012-07-11 14:03 ` [PATCH 15/16] rbd: option " Alex Elder
2012-07-11 21:07   ` Josh Durgin
2012-07-11 14:03 ` [PATCH 16/16] rbd: kill num_reply parameters Alex Elder
2012-07-11 21:07   ` Josh Durgin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=4FFDE873.3000703@inktank.com \
    --to=josh.durgin@inktank.com \
    --cc=ceph-devel@vger.kernel.org \
    --cc=elder@inktank.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.