All of lore.kernel.org
 help / color / mirror / Atom feed
From: David Hildenbrand <david@redhat.com>
To: Janosch Frank <frankja@linux.vnet.ibm.com>, kvm@vger.kernel.org
Cc: schwidefsky@de.ibm.com, borntraeger@de.ibm.com,
	dominik.dingel@gmail.com, linux-s390@vger.kernel.org
Subject: Re: [PATCH v2] mm: s390: Only notify on 4k pages
Date: Tue, 30 Jan 2018 11:19:12 +0100	[thread overview]
Message-ID: <caf66413-26f1-64bc-f49c-c2f49604de46@redhat.com> (raw)
In-Reply-To: <1516962882-86297-1-git-send-email-frankja@linux.vnet.ibm.com>

On 26.01.2018 11:34, Janosch Frank wrote:
> Let's try this
> 
> Signed-off-by: Janosch Frank <frankja@linux.vnet.ibm.com>
> ---
>  arch/s390/include/asm/gmap.h |   5 +-
>  arch/s390/mm/gmap.c          | 142 ++++++++-----------------------------------
>  2 files changed, 28 insertions(+), 119 deletions(-)
> 
> diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h
> index 6287aca..4120360 100644
> --- a/arch/s390/include/asm/gmap.h
> +++ b/arch/s390/include/asm/gmap.h
> @@ -13,9 +13,8 @@
>  #define GMAP_NOTIFY_SHADOW	0x2
>  #define GMAP_NOTIFY_MPROT	0x1
>  
> -/* Status bits in huge and non-huge gmap segment entries. */
> -#define _SEGMENT_ENTRY_GMAP_IN		0x0001	/* invalidation notify bit */
> -#define _SEGMENT_ENTRY_GMAP_SPLIT	0x0002  /* split huge pmd */
> +/* Status bit in huge and non-huge gmap segment entries. */
> +#define _SEGMENT_ENTRY_GMAP_SPLIT	0x0001  /* split huge pmd */
>  /* Status bits only for huge segment entries */
>  #define _SEGMENT_ENTRY_GMAP_UC		0x4000	/* user dirty (migration) */
>  #define _SEGMENT_ENTRY_GMAP_VSIE	0x8000	/* vsie bit */
> diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
> index 66a68af..2f5c8ee 100644
> --- a/arch/s390/mm/gmap.c
> +++ b/arch/s390/mm/gmap.c
> @@ -998,7 +998,7 @@ static void gmap_pte_transfer_prot(struct mm_struct *mm, unsigned long addr,
>   * and requested access rights are incompatible.
>   */
>  static int gmap_pmdp_force_prot(struct gmap *gmap, unsigned long addr,
> -				pmd_t *pmdp, int prot, unsigned long bits)
> +				pmd_t *pmdp, int prot)
>  {
>  	int pmd_i = pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID;
>  	int pmd_p = pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT;
> @@ -1018,7 +1018,6 @@ static int gmap_pmdp_force_prot(struct gmap *gmap, unsigned long addr,
>  		pmd_val(new) |= _SEGMENT_ENTRY_PROTECT;
>  		gmap_pmdp_xchg(gmap, pmdp, new, addr);
>  	}
> -	pmd_val(*pmdp) |=  bits;
>  	return 0;
>  }
>  
> @@ -1102,10 +1101,6 @@ static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
>  	spinlock_t *ptl = NULL;
>  	unsigned long pbits = 0;
>  
> -	/* We have no upper segment, let's go back and fix this up. */
> -	if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
> -		return -EAGAIN;
> -
>  	ptep = gmap_pte_from_pmd(gmap, pmdp, gaddr, &ptl);
>  	if (!ptep)
>  		return -ENOMEM;
> @@ -1134,30 +1129,18 @@ static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
>   */
>  static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
>  			    unsigned long vmaddr, pmd_t *pmdp, pmd_t *hpmdp,
> -			    int prot, unsigned long bits)
> +			    int prot)
>  {
> -	unsigned long sbits = 0;
>  	int ret = 0;
>  
> -	sbits |= (bits & GMAP_NOTIFY_MPROT) ? _SEGMENT_ENTRY_GMAP_IN : 0;
> -	sbits |= (bits & GMAP_NOTIFY_SHADOW) ? _SEGMENT_ENTRY_GMAP_VSIE : 0;
> -
> -	if (((prot != PROT_WRITE) && (bits & GMAP_NOTIFY_SHADOW))) {
> -		ret = gmap_pmd_split(gmap, gaddr, pmdp);
> -		if (ret)
> -			return ret;
> -		return -EFAULT;
> -	}
> -
> -	/* Protect gmap pmd */
> -	ret = gmap_pmdp_force_prot(gmap, gaddr, pmdp, prot, sbits);
> +	/* Protect gmap pmd for dirty tracking. */
> +	ret = gmap_pmdp_force_prot(gmap, gaddr, pmdp, prot);
>  	/*
>  	 * Transfer protection back to the host pmd, so userspace has
>  	 * never more access rights than the VM.
>  	 */
>  	if (!ret)
>  		gmap_pmdp_transfer_prot(gmap->mm, vmaddr, pmdp, hpmdp);
> -
>  	return ret;
>  }
>  
> @@ -1167,7 +1150,7 @@ static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
>   * @gaddr: virtual address in the guest address space
>   * @len: size of area
>   * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
> - * @bits: pgste notification bits to set
> + * @bits: notification bits to set
>   *
>   * Returns 0 if successfully protected, -ENOMEM if out of memory and
>   * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
> @@ -1180,7 +1163,7 @@ static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
>  			      unsigned long len, int prot, unsigned long bits)
>  {
>  	spinlock_t *ptl;
> -	unsigned long vmaddr, dist;
> +	unsigned long vmaddr;
>  	pmd_t *pmdp, *hpmdp;
>  	int rc = 0;
>  
> @@ -1194,13 +1177,8 @@ static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
>  		ptl = pmd_lock(gmap->mm, hpmdp);
>  
>  		pmdp = gmap_pmd_op_walk(gmap, gaddr);
> -		if (pmdp) {
> +		if (pmdp && !(pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)) {
>  			if (!pmd_large(*pmdp)) {
> -				if (gmap_pmd_is_split(pmdp) &&
> -				    (bits & GMAP_NOTIFY_MPROT)) {
> -					pmd_val(*pmdp) |= _SEGMENT_ENTRY_GMAP_IN;
> -				}
> -
>  				rc = gmap_protect_pte(gmap, gaddr, vmaddr,
>  						      pmdp, hpmdp, prot, bits);
>  				if (!rc) {
> @@ -1208,13 +1186,9 @@ static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
>  					gaddr += PAGE_SIZE;
>  				}
>  			} else {
> -				rc =  gmap_protect_pmd(gmap, gaddr, vmaddr,
> -						       pmdp, hpmdp, prot, bits);
> -				if (!rc) {
> -					dist = HPAGE_SIZE - (gaddr & ~HPAGE_MASK);
> -					len = len < dist ? 0 : len - dist;
> -					gaddr = (gaddr & HPAGE_MASK) + HPAGE_SIZE;
> -				}
> +				rc = gmap_pmd_split(gmap, gaddr, pmdp);
> +				if (!rc)
> +					rc = -EFAULT;
>  			}
>  			gmap_pmd_op_end(gmap, pmdp);
>  		}
> @@ -1357,29 +1331,9 @@ static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
>  	}
>  }
>  
> -static int gmap_protect_rmap_pmd(struct gmap *sg, struct gmap_rmap *rmap,
> -				 unsigned long paddr, unsigned long vmaddr,
> -				 pmd_t *pmdp, pmd_t *hpmdp, int prot)
> -{
> -	int rc = 0;
> -
> -	/* We have no upper segment, let's go back and fix this up. */
> -	if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
> -		return -EAGAIN;
> -
> -	spin_lock_nested(&sg->guest_table_lock, GMAP_LOCK_SHADOW);
> -	rc = gmap_protect_pmd(sg->parent, paddr, vmaddr, pmdp, hpmdp,
> -			      prot, GMAP_NOTIFY_SHADOW);
> -	if (!rc)
> -		gmap_insert_rmap(sg, vmaddr & HPAGE_MASK, rmap);
> -
> -	spin_unlock(&sg->guest_table_lock);
> -	return rc;
> -}
> -
>  static int gmap_protect_rmap_pte(struct gmap *sg, struct gmap_rmap *rmap,
>  				 unsigned long paddr, unsigned long vmaddr,
> -				 pmd_t *pmdp, int prot)
> +				 pmd_t *pmdp, pmd_t *hpmdp, int prot)
>  {
>  	int rc = 0;
>  	pte_t *ptep = NULL;
> @@ -1392,8 +1346,8 @@ static int gmap_protect_rmap_pte(struct gmap *sg, struct gmap_rmap *rmap,
>  	ptep = gmap_pte_from_pmd(sg->parent, pmdp, paddr, &ptl);
>  	if (ptep) {
>  		spin_lock_nested(&sg->guest_table_lock, GMAP_LOCK_SHADOW);
> -		rc = ptep_force_prot(sg->parent->mm, paddr, ptep, prot,
> -				     PGSTE_VSIE_BIT);
> +		rc = gmap_protect_pte(sg->parent, paddr, vmaddr, pmdp, hpmdp,
> +				      prot, GMAP_NOTIFY_SHADOW);
>  		if (!rc)
>  			gmap_insert_rmap(sg, vmaddr, rmap);
>  		spin_unlock(&sg->guest_table_lock);
> @@ -1418,7 +1372,7 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
>  {
>  	struct gmap *parent;
>  	struct gmap_rmap *rmap;
> -	unsigned long vmaddr, dist;
> +	unsigned long vmaddr;
>  	pmd_t *pmdp, *hpmdp;
>  	spinlock_t *ptl;
>  	int rc;
> @@ -1446,23 +1400,19 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
>  		}
>  		rc = -EAGAIN;
>  		pmdp = gmap_pmd_op_walk(parent, paddr);
> -		if (pmdp) {
> +		if (pmdp && !(pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)) {
>  			if (!pmd_large(*pmdp)) {
>  				rc = gmap_protect_rmap_pte(sg, rmap, paddr,
> -							   vmaddr, pmdp, prot);
> +							   vmaddr, pmdp, hpmdp,
> +							   prot);
>  				if (!rc) {
>  					paddr += PAGE_SIZE;
>  					len -= PAGE_SIZE;
>  				}
>  			} else {
> -				rc = gmap_protect_rmap_pmd(sg, rmap, paddr,
> -							   vmaddr, pmdp,
> -							   hpmdp, prot);
> -				if (!rc) {
> -					dist = HPAGE_SIZE - (paddr & ~HPAGE_MASK);
> -					len = len < dist ? 0 : len - dist;
> -					paddr = (paddr & HPAGE_MASK) + HPAGE_SIZE;
> -				}
> +				rc = gmap_pmd_split(parent, paddr, pmdp);
> +				if (!rc)
> +					rc = -EFAULT;
>  			}
>  			gmap_pmd_op_end(parent, pmdp);
>  		}
> @@ -2562,53 +2512,19 @@ static void gmap_shadow_notify_pmd(struct gmap *sg, unsigned long vmaddr,
>  				   unsigned long gaddr)
>  {
>  	struct gmap_rmap *rmap, *rnext, *head;
> -	unsigned long start, end, bits, raddr;
> -
> +	unsigned long bits, raddr;
>  
>  	BUG_ON(!gmap_is_shadow(sg));
>  
>  	spin_lock_nested(&sg->guest_table_lock, GMAP_LOCK_SHADOW);
> -	if (sg->removed) {
> -		spin_unlock(&sg->guest_table_lock);
> -		return;
> -	}
> -	/* Check for top level table */
> -	start = sg->orig_asce & _ASCE_ORIGIN;
> -	end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * 4096;
> -	if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start &&
> -	    gaddr < ((end & HPAGE_MASK) + HPAGE_SIZE - 1)) {
> -		/* The complete shadow table has to go */
> -		gmap_unshadow(sg);
> -		spin_unlock(&sg->guest_table_lock);
> -		list_del(&sg->list);
> -		gmap_put(sg);
> -		return;
> -	}
> -	/* Remove the page table tree from on specific entry */
>  	head = radix_tree_delete(&sg->host_to_rmap, (vmaddr & HPAGE_MASK) >> PAGE_SHIFT);
>  	gmap_for_each_rmap_safe(rmap, rnext, head) {
>  		bits = rmap->raddr & _SHADOW_RMAP_MASK;
>  		raddr = rmap->raddr ^ bits;
> -		switch (bits) {
> -		case _SHADOW_RMAP_REGION1:
> -			gmap_unshadow_r2t(sg, raddr);
> -			break;
> -		case _SHADOW_RMAP_REGION2:
> -			gmap_unshadow_r3t(sg, raddr);
> -			break;
> -		case _SHADOW_RMAP_REGION3:
> -			gmap_unshadow_sgt(sg, raddr);
> -			break;
> -		case _SHADOW_RMAP_SEGMENT_LP:
> +		if (bits == _SHADOW_RMAP_SEGMENT_LP)
>  			gmap_unshadow_segment(sg, raddr);
> -			break;
> -		case _SHADOW_RMAP_SEGMENT:
> -			gmap_unshadow_pgt(sg, raddr);
> -			break;
> -		case _SHADOW_RMAP_PGTABLE:
> +		else
>  			gmap_unshadow_page(sg, raddr);
> -			break;
> -		}
>  		kfree(rmap);
>  	}
>  	spin_unlock(&sg->guest_table_lock);
> @@ -2777,9 +2693,8 @@ static void pmdp_notify_gmap(struct gmap *gmap, unsigned long gaddr)
>  	table = gmap_table_walk(gmap, gaddr, 1);
>  	if (!table)
>  		return;
> -	bits = *table & _SEGMENT_ENTRY_GMAP_IN;
>  	if (pmd_large(__pmd(*table)) && (*table & _SEGMENT_ENTRY_GMAP_VSIE))
> -		bits |= _SEGMENT_ENTRY_GMAP_VSIE;
> +		bits = _SEGMENT_ENTRY_GMAP_VSIE;
>  	if (!bits)
>  		return;
>  	*table &= ~bits;
> @@ -2792,8 +2707,6 @@ static void pmdp_notify_gmap(struct gmap *gmap, unsigned long gaddr)
>  			gmap_shadow_notify_pmd(sg, vmaddr, gaddr);
>  		spin_unlock(&gmap->shadow_lock);
>  	}
> -	if (bits & _SEGMENT_ENTRY_GMAP_IN)
> -		gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
>  }
>  
>  static void pmdp_notify_split(struct mm_struct *mm, unsigned long vmaddr,
> @@ -2841,9 +2754,8 @@ void pmdp_notify(struct mm_struct *mm, unsigned long vmaddr)
>  			continue;
>  		}
>  
> -		bits = *table & (_SEGMENT_ENTRY_GMAP_IN);
>  		if (pmd_large(__pmd(*table)) && (*table & _SEGMENT_ENTRY_GMAP_VSIE))
> -			bits |= _SEGMENT_ENTRY_GMAP_VSIE;
> +			bits = _SEGMENT_ENTRY_GMAP_VSIE;
>  		*table &= ~bits;
>  		gaddr = __gmap_segment_gaddr(table);
>  		spin_unlock(&gmap->guest_table_lock);
> @@ -2854,8 +2766,6 @@ void pmdp_notify(struct mm_struct *mm, unsigned long vmaddr)
>  				gmap_shadow_notify_pmd(sg, vmaddr, gaddr);
>  			spin_unlock(&gmap->shadow_lock);
>  		}
> -		if (bits & _SEGMENT_ENTRY_GMAP_IN)
> -			gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
>  	}
>  	rcu_read_unlock();
>  }
> @@ -3028,7 +2938,7 @@ bool gmap_test_and_clear_dirty_segment(struct gmap *gmap, pmd_t *pmdp,
>  
>  	/* Clear UC indication and reset protection */
>  	pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_UC;
> -	gmap_protect_pmd(gmap, gaddr, vmaddr, pmdp, hpmdp, PROT_READ, 0);
> +	gmap_protect_pmd(gmap, gaddr, vmaddr, pmdp, hpmdp, PROT_READ);
>  	return true;
>  }
>  
> 

Yes looks, much better to me!

-- 

Thanks,

David / dhildenb

      reply	other threads:[~2018-01-30 10:19 UTC|newest]

Thread overview: 67+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-12-13 12:53 [RFC/PATCH v2 00/22] KVM/s390: Hugetlbfs enablement Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 01/22] s390/mm: make gmap_protect_range more modular Janosch Frank
2018-01-22 11:33   ` David Hildenbrand
2018-01-22 12:31     ` Janosch Frank
2018-01-22 12:50       ` David Hildenbrand
2018-01-22 13:02         ` Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 02/22] s390/mm: Abstract gmap notify bit setting Janosch Frank
2018-01-22 11:34   ` David Hildenbrand
2017-12-13 12:53 ` [RFC/PATCH v2 03/22] s390/mm: add gmap PMD invalidation notification Janosch Frank
2017-12-21  9:24   ` Janosch Frank
2018-01-22 11:46   ` David Hildenbrand
2018-01-22 13:13     ` Janosch Frank
2018-01-22 13:29       ` David Hildenbrand
2018-01-22 14:04         ` Janosch Frank
2018-01-22 11:56   ` David Hildenbrand
2018-01-22 12:09     ` Janosch Frank
2018-01-22 12:12       ` David Hildenbrand
2017-12-13 12:53 ` [RFC/PATCH v2 04/22] s390/mm: Add gmap pmd invalidation and clearing Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 05/22] s390/mm: hugetlb pages within a gmap can not be freed Janosch Frank
2018-01-24 13:45   ` David Hildenbrand
2018-01-24 13:56     ` Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 06/22] s390/mm: Introduce gmap_pmdp_xchg Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 07/22] RFC: s390/mm: Transfer guest pmd protection to host Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 08/22] s390/mm: Add huge page dirty sync support Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 09/22] s390/mm: clear huge page storage keys on enable_skey Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 10/22] s390/mm: Add huge pmd storage key handling Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 11/22] s390/mm: Remove superfluous parameter Janosch Frank
2017-12-21  9:22   ` Janosch Frank
2018-01-16 12:39     ` Janosch Frank
2018-01-16 13:11   ` David Hildenbrand
2018-01-22 13:14   ` Christian Borntraeger
2018-01-22 13:24     ` Martin Schwidefsky
2017-12-13 12:53 ` [RFC/PATCH v2 12/22] s390/mm: Add gmap_protect_large read protection support Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 13/22] s390/mm: Make gmap_read_table EDAT1 compatible Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 14/22] s390/mm: Make protect_rmap " Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 15/22] s390/mm: GMAP read table extensions Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 16/22] s390/mm: Add shadow segment code Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 17/22] s390/mm: Add VSIE reverse fake case Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 18/22] s390/mm: Remove gmap_pte_op_walk Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 19/22] s390/mm: Split huge pages if granular protection is needed Janosch Frank
2018-01-25  7:16   ` Janosch Frank
2018-01-25 14:39     ` David Hildenbrand
2018-01-25 14:55       ` Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 20/22] s390/mm: Enable gmap huge pmd support Janosch Frank
2017-12-13 12:53 ` [RFC/PATCH v2 21/22] KVM: s390: Add KVM HPAGE capability Janosch Frank
2017-12-20 13:02   ` Cornelia Huck
2017-12-20 13:17     ` Janosch Frank
2017-12-20 13:21       ` Cornelia Huck
2017-12-13 12:53 ` [RFC/PATCH v2 22/22] RFC: s390/mm: Add gmap lock classes Janosch Frank
2017-12-20 12:24   ` Christian Borntraeger
2017-12-20 12:36     ` Janosch Frank
2017-12-20 12:23 ` [RFC/PATCH v2 00/22] KVM/s390: Hugetlbfs enablement Christian Borntraeger
2017-12-21 12:00   ` David Hildenbrand
2017-12-22  9:08     ` Christian Borntraeger
2018-01-02  0:02       ` Janosch Frank
2018-01-22 11:23 ` David Hildenbrand
2018-01-22 11:56   ` Christian Borntraeger
2018-01-23 21:15 ` David Hildenbrand
2018-01-24  9:01   ` Janosch Frank
2018-01-24  9:14     ` David Hildenbrand
2018-01-25 15:33       ` [PATCH 0/2] Huge page pte protection Janosch Frank
2018-01-25 15:33         ` [PATCH 1/2] mm: s390: Only notify on 4k pages Janosch Frank
2018-01-25 16:04           ` David Hildenbrand
2018-01-26 10:31             ` Janosch Frank
2018-01-25 15:33         ` [PATCH 2/2] mm: s390: Rename gmap_pte_op_fixup Janosch Frank
2018-01-26 10:34       ` [PATCH v2] mm: s390: Only notify on 4k pages Janosch Frank
2018-01-30 10:19         ` David Hildenbrand [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=caf66413-26f1-64bc-f49c-c2f49604de46@redhat.com \
    --to=david@redhat.com \
    --cc=borntraeger@de.ibm.com \
    --cc=dominik.dingel@gmail.com \
    --cc=frankja@linux.vnet.ibm.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-s390@vger.kernel.org \
    --cc=schwidefsky@de.ibm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.