All of lore.kernel.org
 help / color / mirror / Atom feed
From: Rahul Singh <Rahul.Singh@arm.com>
To: Stefano Stabellini <sstabellini@kernel.org>
Cc: "xen-devel@lists.xenproject.org" <xen-devel@lists.xenproject.org>,
	Bertrand Marquis <Bertrand.Marquis@arm.com>,
	Julien Grall <julien@xen.org>,
	Volodymyr Babchuk <Volodymyr_Babchuk@epam.com>
Subject: Re: [PATCH v2 5/8] xen/arm: Remove support for PCI ATS on SMMUv3
Date: Wed, 2 Dec 2020 13:07:54 +0000	[thread overview]
Message-ID: <9DCDC4CF-6503-477A-8C38-E50D1448B598@arm.com> (raw)
In-Reply-To: <alpine.DEB.2.21.2012011637560.1100@sstabellini-ThinkPad-T480s>

Hello Stefano,

> On 2 Dec 2020, at 12:39 am, Stefano Stabellini <sstabellini@kernel.org> wrote:
> 
> On Thu, 26 Nov 2020, Rahul Singh wrote:
>> PCI ATS functionality is not implemented and tested on ARM. Remove the
>> PCI ATS support, once PCI ATS support is tested and available this
>> patch can be added.
>> 
>> Signed-off-by: Rahul Singh <rahul.singh@arm.com>
> 
> This looks like a revert of 9ce27afc0830f. Can we add that as a note to
> the commit message?
 
Ok I will add in next version.

> 
> One very minor comment at the bottom

Ack.

Regards,
Rahul
> 
> 
>> ---
>> xen/drivers/passthrough/arm/smmu-v3.c | 273 --------------------------
>> 1 file changed, 273 deletions(-)
>> 
>> diff --git a/xen/drivers/passthrough/arm/smmu-v3.c b/xen/drivers/passthrough/arm/smmu-v3.c
>> index 401f7ae006..6a33628087 100644
>> --- a/xen/drivers/passthrough/arm/smmu-v3.c
>> +++ b/xen/drivers/passthrough/arm/smmu-v3.c
>> @@ -460,16 +460,6 @@ struct arm_smmu_cmdq_ent {
>> 			u64			addr;
>> 		} tlbi;
>> 
>> -		#define CMDQ_OP_ATC_INV		0x40
>> -		#define ATC_INV_SIZE_ALL	52
>> -		struct {
>> -			u32			sid;
>> -			u32			ssid;
>> -			u64			addr;
>> -			u8			size;
>> -			bool			global;
>> -		} atc;
>> -
>> 		#define CMDQ_OP_PRI_RESP	0x41
>> 		struct {
>> 			u32			sid;
>> @@ -632,7 +622,6 @@ struct arm_smmu_master {
>> 	struct list_head		domain_head;
>> 	u32				*sids;
>> 	unsigned int			num_sids;
>> -	bool				ats_enabled;
>> 	unsigned int			ssid_bits;
>> };
>> 
>> @@ -650,7 +639,6 @@ struct arm_smmu_domain {
>> 
>> 	struct io_pgtable_ops		*pgtbl_ops;
>> 	bool				non_strict;
>> -	atomic_t			nr_ats_masters;
>> 
>> 	enum arm_smmu_domain_stage	stage;
>> 	union {
>> @@ -886,14 +874,6 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
>> 	case CMDQ_OP_TLBI_S12_VMALL:
>> 		cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
>> 		break;
>> -	case CMDQ_OP_ATC_INV:
>> -		cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid);
>> -		cmd[0] |= FIELD_PREP(CMDQ_ATC_0_GLOBAL, ent->atc.global);
>> -		cmd[0] |= FIELD_PREP(CMDQ_ATC_0_SSID, ent->atc.ssid);
>> -		cmd[0] |= FIELD_PREP(CMDQ_ATC_0_SID, ent->atc.sid);
>> -		cmd[1] |= FIELD_PREP(CMDQ_ATC_1_SIZE, ent->atc.size);
>> -		cmd[1] |= ent->atc.addr & CMDQ_ATC_1_ADDR_MASK;
>> -		break;
>> 	case CMDQ_OP_PRI_RESP:
>> 		cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid);
>> 		cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SSID, ent->pri.ssid);
>> @@ -925,7 +905,6 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
>> 		[CMDQ_ERR_CERROR_NONE_IDX]	= "No error",
>> 		[CMDQ_ERR_CERROR_ILL_IDX]	= "Illegal command",
>> 		[CMDQ_ERR_CERROR_ABT_IDX]	= "Abort on command fetch",
>> -		[CMDQ_ERR_CERROR_ATC_INV_IDX]	= "ATC invalidate timeout",
>> 	};
>> 
>> 	int i;
>> @@ -945,14 +924,6 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
>> 		dev_err(smmu->dev, "retrying command fetch\n");
>> 	case CMDQ_ERR_CERROR_NONE_IDX:
>> 		return;
>> -	case CMDQ_ERR_CERROR_ATC_INV_IDX:
>> -		/*
>> -		 * ATC Invalidation Completion timeout. CONS is still pointing
>> -		 * at the CMD_SYNC. Attempt to complete other pending commands
>> -		 * by repeating the CMD_SYNC, though we might well end up back
>> -		 * here since the ATC invalidation may still be pending.
>> -		 */
>> -		return;
>> 	case CMDQ_ERR_CERROR_ILL_IDX:
>> 	default:
>> 		break;
>> @@ -1422,9 +1393,6 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
>> 		val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS);
>> 	}
>> 
>> -	if (master->ats_enabled)
>> -		dst[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS,
>> -						 STRTAB_STE_1_EATS_TRANS));
>> 
>> 	arm_smmu_sync_ste_for_sid(smmu, sid);
>> 	/* See comment in arm_smmu_write_ctx_desc() */
>> @@ -1633,112 +1601,6 @@ static irqreturn_t arm_smmu_combined_irq_handler(int irq, void *dev)
>> 	return IRQ_WAKE_THREAD;
>> }
>> 
>> -static void
>> -arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size,
>> -			struct arm_smmu_cmdq_ent *cmd)
>> -{
>> -	size_t log2_span;
>> -	size_t span_mask;
>> -	/* ATC invalidates are always on 4096-bytes pages */
>> -	size_t inval_grain_shift = 12;
>> -	unsigned long page_start, page_end;
>> -
>> -	*cmd = (struct arm_smmu_cmdq_ent) {
>> -		.opcode			= CMDQ_OP_ATC_INV,
>> -		.substream_valid	= !!ssid,
>> -		.atc.ssid		= ssid,
>> -	};
>> -
>> -	if (!size) {
>> -		cmd->atc.size = ATC_INV_SIZE_ALL;
>> -		return;
>> -	}
>> -
>> -	page_start	= iova >> inval_grain_shift;
>> -	page_end	= (iova + size - 1) >> inval_grain_shift;
>> -
>> -	/*
>> -	 * In an ATS Invalidate Request, the address must be aligned on the
>> -	 * range size, which must be a power of two number of page sizes. We
>> -	 * thus have to choose between grossly over-invalidating the region, or
>> -	 * splitting the invalidation into multiple commands. For simplicity
>> -	 * we'll go with the first solution, but should refine it in the future
>> -	 * if multiple commands are shown to be more efficient.
>> -	 *
>> -	 * Find the smallest power of two that covers the range. The most
>> -	 * significant differing bit between the start and end addresses,
>> -	 * fls(start ^ end), indicates the required span. For example:
>> -	 *
>> -	 * We want to invalidate pages [8; 11]. This is already the ideal range:
>> -	 *		x = 0b1000 ^ 0b1011 = 0b11
>> -	 *		span = 1 << fls(x) = 4
>> -	 *
>> -	 * To invalidate pages [7; 10], we need to invalidate [0; 15]:
>> -	 *		x = 0b0111 ^ 0b1010 = 0b1101
>> -	 *		span = 1 << fls(x) = 16
>> -	 */
>> -	log2_span	= fls_long(page_start ^ page_end);
>> -	span_mask	= (1ULL << log2_span) - 1;
>> -
>> -	page_start	&= ~span_mask;
>> -
>> -	cmd->atc.addr	= page_start << inval_grain_shift;
>> -	cmd->atc.size	= log2_span;
>> -}
>> -
>> -static int arm_smmu_atc_inv_master(struct arm_smmu_master *master,
>> -				   struct arm_smmu_cmdq_ent *cmd)
>> -{
>> -	int i;
>> -
>> -	if (!master->ats_enabled)
>> -		return 0;
>> -
>> -	for (i = 0; i < master->num_sids; i++) {
>> -		cmd->atc.sid = master->sids[i];
>> -		arm_smmu_cmdq_issue_cmd(master->smmu, cmd);
>> -	}
>> -
>> -	return arm_smmu_cmdq_issue_sync(master->smmu);
>> -}
>> -
>> -static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
>> -				   int ssid, unsigned long iova, size_t size)
>> -{
>> -	int ret = 0;
>> -	unsigned long flags;
>> -	struct arm_smmu_cmdq_ent cmd;
>> -	struct arm_smmu_master *master;
>> -
>> -	if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS))
>> -		return 0;
>> -
>> -	/*
>> -	 * Ensure that we've completed prior invalidation of the main TLBs
>> -	 * before we read 'nr_ats_masters' in case of a concurrent call to
>> -	 * arm_smmu_enable_ats():
>> -	 *
>> -	 *	// unmap()			// arm_smmu_enable_ats()
>> -	 *	TLBI+SYNC			atomic_inc(&nr_ats_masters);
>> -	 *	smp_mb();			[...]
>> -	 *	atomic_read(&nr_ats_masters);	pci_enable_ats() // writel()
>> -	 *
>> -	 * Ensures that we always see the incremented 'nr_ats_masters' count if
>> -	 * ATS was enabled at the PCI device before completion of the TLBI.
>> -	 */
>> -	smp_mb();
>> -	if (!atomic_read(&smmu_domain->nr_ats_masters))
>> -		return 0;
>> -
>> -	arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd);
>> -
>> -	spin_lock_irqsave(&smmu_domain->devices_lock, flags);
>> -	list_for_each_entry(master, &smmu_domain->devices, domain_head)
>> -		ret |= arm_smmu_atc_inv_master(master, &cmd);
>> -	spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
>> -
>> -	return ret ? -ETIMEDOUT : 0;
>> -}
>> 
>> /* IO_PGTABLE API */
>> static void arm_smmu_tlb_inv_context(void *cookie)
>> @@ -1765,7 +1627,6 @@ static void arm_smmu_tlb_inv_context(void *cookie)
>> 	 */
>> 	arm_smmu_cmdq_issue_cmd(smmu, &cmd);
>> 	arm_smmu_cmdq_issue_sync(smmu);
>> -	arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0);
>> }
>> 
>> static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
>> @@ -2106,108 +1967,6 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
>> 	}
>> }
>> 
>> -static bool arm_smmu_ats_supported(struct arm_smmu_master *master)
>> -{
>> -	struct device *dev = master->dev;
>> -	struct arm_smmu_device *smmu = master->smmu;
>> -	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
>> -
>> -	if (!(smmu->features & ARM_SMMU_FEAT_ATS))
>> -		return false;
>> -
>> -	if (!(fwspec->flags & IOMMU_FWSPEC_PCI_RC_ATS))
>> -		return false;
>> -
>> -	return dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev));
>> -}
>> -
>> -static void arm_smmu_enable_ats(struct arm_smmu_master *master)
>> -{
>> -	size_t stu;
>> -	struct pci_dev *pdev;
>> -	struct arm_smmu_device *smmu = master->smmu;
>> -	struct arm_smmu_domain *smmu_domain = master->domain;
>> -
>> -	/* Don't enable ATS at the endpoint if it's not enabled in the STE */
>> -	if (!master->ats_enabled)
>> -		return;
>> -
>> -	/* Smallest Translation Unit: log2 of the smallest supported granule */
>> -	stu = __ffs(smmu->pgsize_bitmap);
>> -	pdev = to_pci_dev(master->dev);
>> -
>> -	atomic_inc(&smmu_domain->nr_ats_masters);
>> -	arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0);
>> -	if (pci_enable_ats(pdev, stu))
>> -		dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu);
>> -}
>> -
>> -static void arm_smmu_disable_ats(struct arm_smmu_master *master)
>> -{
>> -	struct arm_smmu_cmdq_ent cmd;
>> -	struct arm_smmu_domain *smmu_domain = master->domain;
>> -
>> -	if (!master->ats_enabled)
>> -		return;
>> -
>> -	pci_disable_ats(to_pci_dev(master->dev));
>> -	/*
>> -	 * Ensure ATS is disabled at the endpoint before we issue the
>> -	 * ATC invalidation via the SMMU.
>> -	 */
>> -	wmb();
>> -	arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd);
>> -	arm_smmu_atc_inv_master(master, &cmd);
>> -    atomic_dec(&smmu_domain->nr_ats_masters);
>> -}
>> -
>> -static int arm_smmu_enable_pasid(struct arm_smmu_master *master)
>> -{
>> -	int ret;
>> -	int features;
>> -	int num_pasids;
>> -	struct pci_dev *pdev;
>> -
>> -	if (!dev_is_pci(master->dev))
>> -		return -ENODEV;
>> -
>> -	pdev = to_pci_dev(master->dev);
>> -
>> -	features = pci_pasid_features(pdev);
>> -	if (features < 0)
>> -		return features;
>> -
>> -	num_pasids = pci_max_pasids(pdev);
>> -	if (num_pasids <= 0)
>> -		return num_pasids;
>> -
>> -	ret = pci_enable_pasid(pdev, features);
>> -	if (ret) {
>> -		dev_err(&pdev->dev, "Failed to enable PASID\n");
>> -		return ret;
>> -	}
>> -
>> -	master->ssid_bits = min_t(u8, ilog2(num_pasids),
>> -				  master->smmu->ssid_bits);
>> -	return 0;
>> -}
>> -
>> -static void arm_smmu_disable_pasid(struct arm_smmu_master *master)
>> -{
>> -	struct pci_dev *pdev;
>> -
>> -	if (!dev_is_pci(master->dev))
>> -		return;
>> -
>> -	pdev = to_pci_dev(master->dev);
>> -
>> -	if (!pdev->pasid_enabled)
>> -		return;
>> -
>> -	master->ssid_bits = 0;
>> -	pci_disable_pasid(pdev);
>> -}
>> -
>> static void arm_smmu_detach_dev(struct arm_smmu_master *master)
>> {
>> 	unsigned long flags;
>> @@ -2216,14 +1975,11 @@ static void arm_smmu_detach_dev(struct arm_smmu_master *master)
>> 	if (!smmu_domain)
>> 		return;
>> 
>> -	arm_smmu_disable_ats(master);
>> -
>> 	spin_lock_irqsave(&smmu_domain->devices_lock, flags);
>> 	list_del(&master->domain_head);
>> 	spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
>> 
>> 	master->domain = NULL;
>> -	master->ats_enabled = false;
>> 	arm_smmu_install_ste_for_dev(master);
>> }
>> 
>> @@ -2271,17 +2027,12 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
>> 
>> 	master->domain = smmu_domain;
>> 
>> -	if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS)
>> -		master->ats_enabled = arm_smmu_ats_supported(master);
>> -
>> 	arm_smmu_install_ste_for_dev(master);
>> 
>> 	spin_lock_irqsave(&smmu_domain->devices_lock, flags);
>> 	list_add(&master->domain_head, &smmu_domain->devices);
>> 	spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
>> 
>> -	arm_smmu_enable_ats(master);
>> -
>> out_unlock:
>> 	mutex_unlock(&smmu_domain->init_mutex);
>> 	return ret;
>> @@ -2410,16 +2161,6 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
>> 
>> 	master->ssid_bits = min(smmu->ssid_bits, fwspec->num_pasid_bits);
>> 
>> -	/*
>> -	 * Note that PASID must be enabled before, and disabled after ATS:
>> -	 * PCI Express Base 4.0r1.0 - 10.5.1.3 ATS Control Register
>> -	 *
>> -	 *   Behavior is undefined if this bit is Set and the value of the PASID
>> -	 *   Enable, Execute Requested Enable, or Privileged Mode Requested bits
>> -	 *   are changed.
>> -	 */
>> -	arm_smmu_enable_pasid(master);
>> -
>> 	if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB))
>> 		master->ssid_bits = min_t(u8, master->ssid_bits,
>> 					  CTXDESC_LINEAR_CDMAX);
>> @@ -2442,7 +2183,6 @@ static void arm_smmu_release_device(struct device *dev)
>> 
>> 	master = dev_iommu_priv_get(dev);
>> 	arm_smmu_detach_dev(master);
>> -	arm_smmu_disable_pasid(master);
>> 	kfree(master);
>> 	iommu_fwspec_free(dev);
>> }
>> @@ -2997,15 +2737,6 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
>> 		}
>> 	}
>> 
>> -	if (smmu->features & ARM_SMMU_FEAT_ATS) {
>> -		enables |= CR0_ATSCHK;
>> -		ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
>> -					      ARM_SMMU_CR0ACK);
>> -		if (ret) {
>> -			dev_err(smmu->dev, "failed to enable ATS check\n");
>> -			return ret;
>> -		}
>> -	}
>> 
>> 	ret = arm_smmu_setup_irqs(smmu);
>> 	if (ret) {
>> @@ -3076,13 +2807,9 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
>> 	if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI)
>> 		smmu->features |= ARM_SMMU_FEAT_PRI;
>> 
>> -	if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS)
>> -		smmu->features |= ARM_SMMU_FEAT_ATS;
>> -
>> 	if (reg & IDR0_SEV)
>> 		smmu->features |= ARM_SMMU_FEAT_SEV;
>> 
>> -
>> 	if (reg & IDR0_HYP)
> 
> spurious change



  reply	other threads:[~2020-12-02 13:08 UTC|newest]

Thread overview: 53+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-26 17:01 [PATCH v2 0/8] xen/arm: Add support for SMMUv3 driver Rahul Singh
2020-11-26 17:02 ` [PATCH v2 1/8] xen/arm: Import the SMMUv3 driver from Linux Rahul Singh
2020-12-01 22:01   ` Stefano Stabellini
2020-11-26 17:02 ` [PATCH v2 2/8] xen/arm: revert atomic operation related command-queue insertion patch Rahul Singh
2020-12-01 22:23   ` Stefano Stabellini
2020-12-02 13:05     ` Rahul Singh
2020-12-02 13:44   ` Julien Grall
2020-12-03 11:49     ` Rahul Singh
2020-11-26 17:02 ` [PATCH v2 3/8] xen/arm: revert patch related to XArray Rahul Singh
2020-12-02  0:20   ` Stefano Stabellini
2020-12-02 13:46   ` Julien Grall
2020-12-03 12:57     ` Rahul Singh
2020-12-04  8:52       ` Julien Grall
2020-11-26 17:02 ` [PATCH v2 4/8] xen/arm: Remove support for MSI on SMMUv3 Rahul Singh
2020-12-02  0:33   ` Stefano Stabellini
2020-12-02  0:40     ` Stefano Stabellini
2020-12-02 13:12       ` Rahul Singh
2020-12-02 14:11         ` Julien Grall
2020-12-03 12:59           ` Rahul Singh
2020-11-26 17:02 ` [PATCH v2 5/8] xen/arm: Remove support for PCI ATS " Rahul Singh
2020-12-02  0:39   ` Stefano Stabellini
2020-12-02 13:07     ` Rahul Singh [this message]
2020-12-02 13:57   ` Julien Grall
2020-11-26 17:02 ` [PATCH v2 6/8] xen/arm: Remove support for Stage-1 translation " Rahul Singh
2020-12-02  0:53   ` Stefano Stabellini
2020-12-02 13:13     ` Rahul Singh
2020-11-26 17:02 ` [PATCH v2 7/8] xen/arm: Remove Linux specific code that is not usable in XEN Rahul Singh
2020-12-02  1:48   ` Stefano Stabellini
2020-12-02 14:34     ` Rahul Singh
2020-12-02 14:39       ` Julien Grall
2020-12-02 14:45   ` Julien Grall
2020-12-03 14:33     ` Rahul Singh
2020-12-04  9:05       ` Julien Grall
2020-12-07 10:36         ` Rahul Singh
2020-12-07 10:55           ` Julien Grall
2020-11-26 17:02 ` [PATCH v2 8/8] xen/arm: Add support for SMMUv3 driver Rahul Singh
2020-12-02  2:51   ` Stefano Stabellini
2020-12-02 16:27     ` Rahul Singh
2020-12-02 19:26       ` Rahul Singh
2020-12-02 16:47     ` Julien Grall
2020-12-03  4:13       ` Stefano Stabellini
2020-12-03 14:40         ` Rahul Singh
2020-12-03 18:47           ` Stefano Stabellini
2020-12-07  8:33             ` Rahul Singh
2020-12-02 16:22   ` Julien Grall
2020-12-07 12:12     ` Rahul Singh
2020-12-07 17:39       ` Julien Grall
2020-12-07 18:42         ` Rahul Singh
2020-12-08 19:05           ` Julien Grall
2020-12-09  1:19             ` Stefano Stabellini
2020-12-09  7:55               ` Bertrand Marquis
2020-12-09  9:18                 ` Julien Grall
2020-12-09 18:37                   ` Rahul Singh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=9DCDC4CF-6503-477A-8C38-E50D1448B598@arm.com \
    --to=rahul.singh@arm.com \
    --cc=Bertrand.Marquis@arm.com \
    --cc=Volodymyr_Babchuk@epam.com \
    --cc=julien@xen.org \
    --cc=sstabellini@kernel.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.