linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/2] hugetlb: really allocate vma lock for all sharable vmas
@ 2022-12-12 23:50 Mike Kravetz
  2022-12-12 23:50 ` [PATCH 2/2] hugetlb: update vma flag check for hugetlb vma lock Mike Kravetz
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Mike Kravetz @ 2022-12-12 23:50 UTC (permalink / raw)
  To: linux-mm, linux-kernel
  Cc: Muchun Song, Miaohe Lin, David Hildenbrand, Peter Xu,
	Naoya Horiguchi, Aneesh Kumar K . V, James Houghton,
	Mina Almasry, Andrew Morton, Mike Kravetz

Commit bbff39cc6cbc ("hugetlb: allocate vma lock for all sharable vmas")
removed the pmd sharable checks in the vma lock helper routines.
However, it left the functional version of helper routines behind #ifdef
CONFIG_ARCH_WANT_HUGE_PMD_SHARE.  Therefore, the vma lock is not being
used for sharable vmas on architectures that do not support pmd sharing.
On these architectures, a potential fault/truncation race is exposed
that could leave pages in a hugetlb file past i_size until the file is
removed.

Move the functional vma lock helpers outside the ifdef, and remove the
non-functional stubs.  Since the vma lock is not just for pmd sharing,
rename the routine __vma_shareable_flags_pmd.

Fixes: bbff39cc6cbc ("hugetlb: allocate vma lock for all sharable vmas")
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
---
 mm/hugetlb.c | 333 +++++++++++++++++++++++----------------------------
 1 file changed, 148 insertions(+), 185 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e36ca75311a5..9c251faeb6f5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -255,6 +255,152 @@ static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
 	return subpool_inode(file_inode(vma->vm_file));
 }
 
+/*
+ * hugetlb vma_lock helper routines
+ */
+static bool __vma_shareable_lock(struct vm_area_struct *vma)
+{
+	return vma->vm_flags & (VM_MAYSHARE | VM_SHARED) &&
+		vma->vm_private_data;
+}
+
+void hugetlb_vma_lock_read(struct vm_area_struct *vma)
+{
+	if (__vma_shareable_lock(vma)) {
+		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+		down_read(&vma_lock->rw_sema);
+	}
+}
+
+void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
+{
+	if (__vma_shareable_lock(vma)) {
+		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+		up_read(&vma_lock->rw_sema);
+	}
+}
+
+void hugetlb_vma_lock_write(struct vm_area_struct *vma)
+{
+	if (__vma_shareable_lock(vma)) {
+		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+		down_write(&vma_lock->rw_sema);
+	}
+}
+
+void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
+{
+	if (__vma_shareable_lock(vma)) {
+		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+		up_write(&vma_lock->rw_sema);
+	}
+}
+
+int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
+{
+	struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+	if (!__vma_shareable_lock(vma))
+		return 1;
+
+	return down_write_trylock(&vma_lock->rw_sema);
+}
+
+void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
+{
+	if (__vma_shareable_lock(vma)) {
+		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+		lockdep_assert_held(&vma_lock->rw_sema);
+	}
+}
+
+void hugetlb_vma_lock_release(struct kref *kref)
+{
+	struct hugetlb_vma_lock *vma_lock = container_of(kref,
+			struct hugetlb_vma_lock, refs);
+
+	kfree(vma_lock);
+}
+
+static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
+{
+	struct vm_area_struct *vma = vma_lock->vma;
+
+	/*
+	 * vma_lock structure may or not be released as a result of put,
+	 * it certainly will no longer be attached to vma so clear pointer.
+	 * Semaphore synchronizes access to vma_lock->vma field.
+	 */
+	vma_lock->vma = NULL;
+	vma->vm_private_data = NULL;
+	up_write(&vma_lock->rw_sema);
+	kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
+}
+
+static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
+{
+	if (__vma_shareable_lock(vma)) {
+		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+		__hugetlb_vma_unlock_write_put(vma_lock);
+	}
+}
+
+static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
+{
+	/*
+	 * Only present in sharable vmas.
+	 */
+	if (!vma || !__vma_shareable_lock(vma))
+		return;
+
+	if (vma->vm_private_data) {
+		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+		down_write(&vma_lock->rw_sema);
+		__hugetlb_vma_unlock_write_put(vma_lock);
+	}
+}
+
+static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
+{
+	struct hugetlb_vma_lock *vma_lock;
+
+	/* Only establish in (flags) sharable vmas */
+	if (!vma || !(vma->vm_flags & VM_MAYSHARE))
+		return;
+
+	/* Should never get here with non-NULL vm_private_data */
+	if (vma->vm_private_data)
+		return;
+
+	vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL);
+	if (!vma_lock) {
+		/*
+		 * If we can not allocate structure, then vma can not
+		 * participate in pmd sharing.  This is only a possible
+		 * performance enhancement and memory saving issue.
+		 * However, the lock is also used to synchronize page
+		 * faults with truncation.  If the lock is not present,
+		 * unlikely races could leave pages in a file past i_size
+		 * until the file is removed.  Warn in the unlikely case of
+		 * allocation failure.
+		 */
+		pr_warn_once("HugeTLB: unable to allocate vma specific lock\n");
+		return;
+	}
+
+	kref_init(&vma_lock->refs);
+	init_rwsem(&vma_lock->rw_sema);
+	vma_lock->vma = vma;
+	vma->vm_private_data = vma_lock;
+}
+
 /* Helper that removes a struct file_region from the resv_map cache and returns
  * it for use.
  */
@@ -6557,7 +6703,8 @@ bool hugetlb_reserve_pages(struct inode *inode,
 	}
 
 	/*
-	 * vma specific semaphore used for pmd sharing synchronization
+	 * vma specific semaphore used for pmd sharing and fault/truncation
+	 * synchronization
 	 */
 	hugetlb_vma_lock_alloc(vma);
 
@@ -6813,149 +6960,6 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
 		*end = ALIGN(*end, PUD_SIZE);
 }
 
-static bool __vma_shareable_flags_pmd(struct vm_area_struct *vma)
-{
-	return vma->vm_flags & (VM_MAYSHARE | VM_SHARED) &&
-		vma->vm_private_data;
-}
-
-void hugetlb_vma_lock_read(struct vm_area_struct *vma)
-{
-	if (__vma_shareable_flags_pmd(vma)) {
-		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
-
-		down_read(&vma_lock->rw_sema);
-	}
-}
-
-void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
-{
-	if (__vma_shareable_flags_pmd(vma)) {
-		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
-
-		up_read(&vma_lock->rw_sema);
-	}
-}
-
-void hugetlb_vma_lock_write(struct vm_area_struct *vma)
-{
-	if (__vma_shareable_flags_pmd(vma)) {
-		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
-
-		down_write(&vma_lock->rw_sema);
-	}
-}
-
-void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
-{
-	if (__vma_shareable_flags_pmd(vma)) {
-		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
-
-		up_write(&vma_lock->rw_sema);
-	}
-}
-
-int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
-{
-	struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
-
-	if (!__vma_shareable_flags_pmd(vma))
-		return 1;
-
-	return down_write_trylock(&vma_lock->rw_sema);
-}
-
-void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
-{
-	if (__vma_shareable_flags_pmd(vma)) {
-		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
-
-		lockdep_assert_held(&vma_lock->rw_sema);
-	}
-}
-
-void hugetlb_vma_lock_release(struct kref *kref)
-{
-	struct hugetlb_vma_lock *vma_lock = container_of(kref,
-			struct hugetlb_vma_lock, refs);
-
-	kfree(vma_lock);
-}
-
-static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
-{
-	struct vm_area_struct *vma = vma_lock->vma;
-
-	/*
-	 * vma_lock structure may or not be released as a result of put,
-	 * it certainly will no longer be attached to vma so clear pointer.
-	 * Semaphore synchronizes access to vma_lock->vma field.
-	 */
-	vma_lock->vma = NULL;
-	vma->vm_private_data = NULL;
-	up_write(&vma_lock->rw_sema);
-	kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
-}
-
-static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
-{
-	if (__vma_shareable_flags_pmd(vma)) {
-		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
-
-		__hugetlb_vma_unlock_write_put(vma_lock);
-	}
-}
-
-static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
-{
-	/*
-	 * Only present in sharable vmas.
-	 */
-	if (!vma || !__vma_shareable_flags_pmd(vma))
-		return;
-
-	if (vma->vm_private_data) {
-		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
-
-		down_write(&vma_lock->rw_sema);
-		__hugetlb_vma_unlock_write_put(vma_lock);
-	}
-}
-
-static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
-{
-	struct hugetlb_vma_lock *vma_lock;
-
-	/* Only establish in (flags) sharable vmas */
-	if (!vma || !(vma->vm_flags & VM_MAYSHARE))
-		return;
-
-	/* Should never get here with non-NULL vm_private_data */
-	if (vma->vm_private_data)
-		return;
-
-	vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL);
-	if (!vma_lock) {
-		/*
-		 * If we can not allocate structure, then vma can not
-		 * participate in pmd sharing.  This is only a possible
-		 * performance enhancement and memory saving issue.
-		 * However, the lock is also used to synchronize page
-		 * faults with truncation.  If the lock is not present,
-		 * unlikely races could leave pages in a file past i_size
-		 * until the file is removed.  Warn in the unlikely case of
-		 * allocation failure.
-		 */
-		pr_warn_once("HugeTLB: unable to allocate vma specific lock\n");
-		return;
-	}
-
-	kref_init(&vma_lock->refs);
-	init_rwsem(&vma_lock->rw_sema);
-	vma_lock->vma = vma;
-	vma->vm_private_data = vma_lock;
-}
-
 /*
  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
  * and returns the corresponding pte. While this is not necessary for the
@@ -7044,47 +7048,6 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
 
 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
 
-void hugetlb_vma_lock_read(struct vm_area_struct *vma)
-{
-}
-
-void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
-{
-}
-
-void hugetlb_vma_lock_write(struct vm_area_struct *vma)
-{
-}
-
-void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
-{
-}
-
-int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
-{
-	return 1;
-}
-
-void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
-{
-}
-
-void hugetlb_vma_lock_release(struct kref *kref)
-{
-}
-
-static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
-{
-}
-
-static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
-{
-}
-
-static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
-{
-}
-
 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
 		      unsigned long addr, pud_t *pud)
 {
-- 
2.38.1


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 2/2] hugetlb: update vma flag check for hugetlb vma lock
  2022-12-12 23:50 [PATCH 1/2] hugetlb: really allocate vma lock for all sharable vmas Mike Kravetz
@ 2022-12-12 23:50 ` Mike Kravetz
  2022-12-17  2:01   ` Miaohe Lin
  2022-12-15 22:37 ` [PATCH 1/2] hugetlb: really allocate vma lock for all sharable vmas Andrew Morton
  2022-12-17  1:59 ` Miaohe Lin
  2 siblings, 1 reply; 6+ messages in thread
From: Mike Kravetz @ 2022-12-12 23:50 UTC (permalink / raw)
  To: linux-mm, linux-kernel
  Cc: Muchun Song, Miaohe Lin, David Hildenbrand, Peter Xu,
	Naoya Horiguchi, Aneesh Kumar K . V, James Houghton,
	Mina Almasry, Andrew Morton, Mike Kravetz

The check for whether a hugetlb vma lock exists partially depends on
the vma's flags.  Currently, it checks for either VM_MAYSHARE or
VM_SHARED.  The reason both flags are used is because VM_MAYSHARE was
previously cleared in hugetlb vmas as they are tore down.  This is no
longer the case, and only the VM_MAYSHARE check is required.

Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
---
 mm/hugetlb.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 9c251faeb6f5..985881f9e8cc 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -260,8 +260,7 @@ static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
  */
 static bool __vma_shareable_lock(struct vm_area_struct *vma)
 {
-	return vma->vm_flags & (VM_MAYSHARE | VM_SHARED) &&
-		vma->vm_private_data;
+	return vma->vm_flags & VM_MAYSHARE && vma->vm_private_data;
 }
 
 void hugetlb_vma_lock_read(struct vm_area_struct *vma)
-- 
2.38.1


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/2] hugetlb: really allocate vma lock for all sharable vmas
  2022-12-12 23:50 [PATCH 1/2] hugetlb: really allocate vma lock for all sharable vmas Mike Kravetz
  2022-12-12 23:50 ` [PATCH 2/2] hugetlb: update vma flag check for hugetlb vma lock Mike Kravetz
@ 2022-12-15 22:37 ` Andrew Morton
  2022-12-15 22:50   ` Mike Kravetz
  2022-12-17  1:59 ` Miaohe Lin
  2 siblings, 1 reply; 6+ messages in thread
From: Andrew Morton @ 2022-12-15 22:37 UTC (permalink / raw)
  To: Mike Kravetz
  Cc: linux-mm, linux-kernel, Muchun Song, Miaohe Lin,
	David Hildenbrand, Peter Xu, Naoya Horiguchi, Aneesh Kumar K . V,
	James Houghton, Mina Almasry

On Mon, 12 Dec 2022 15:50:41 -0800 Mike Kravetz <mike.kravetz@oracle.com> wrote:

> Commit bbff39cc6cbc ("hugetlb: allocate vma lock for all sharable vmas")
> removed the pmd sharable checks in the vma lock helper routines.
> However, it left the functional version of helper routines behind #ifdef
> CONFIG_ARCH_WANT_HUGE_PMD_SHARE.  Therefore, the vma lock is not being
> used for sharable vmas on architectures that do not support pmd sharing.
> On these architectures, a potential fault/truncation race is exposed
> that could leave pages in a hugetlb file past i_size until the file is
> removed.
> 
> Move the functional vma lock helpers outside the ifdef, and remove the
> non-functional stubs.  Since the vma lock is not just for pmd sharing,
> rename the routine __vma_shareable_flags_pmd.
> 
> Fixes: bbff39cc6cbc ("hugetlb: allocate vma lock for all sharable vmas")

So I assume we want this backported to 6.1?  I added a cc:stable.

The [2/2] patch is, I think, 6.3-rc1 material?

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/2] hugetlb: really allocate vma lock for all sharable vmas
  2022-12-15 22:37 ` [PATCH 1/2] hugetlb: really allocate vma lock for all sharable vmas Andrew Morton
@ 2022-12-15 22:50   ` Mike Kravetz
  0 siblings, 0 replies; 6+ messages in thread
From: Mike Kravetz @ 2022-12-15 22:50 UTC (permalink / raw)
  To: Andrew Morton
  Cc: linux-mm, linux-kernel, Muchun Song, Miaohe Lin,
	David Hildenbrand, Peter Xu, Naoya Horiguchi, Aneesh Kumar K . V,
	James Houghton, Mina Almasry

On 12/15/22 14:37, Andrew Morton wrote:
> On Mon, 12 Dec 2022 15:50:41 -0800 Mike Kravetz <mike.kravetz@oracle.com> wrote:
> 
> > Commit bbff39cc6cbc ("hugetlb: allocate vma lock for all sharable vmas")
> > removed the pmd sharable checks in the vma lock helper routines.
> > However, it left the functional version of helper routines behind #ifdef
> > CONFIG_ARCH_WANT_HUGE_PMD_SHARE.  Therefore, the vma lock is not being
> > used for sharable vmas on architectures that do not support pmd sharing.
> > On these architectures, a potential fault/truncation race is exposed
> > that could leave pages in a hugetlb file past i_size until the file is
> > removed.
> > 
> > Move the functional vma lock helpers outside the ifdef, and remove the
> > non-functional stubs.  Since the vma lock is not just for pmd sharing,
> > rename the routine __vma_shareable_flags_pmd.
> > 
> > Fixes: bbff39cc6cbc ("hugetlb: allocate vma lock for all sharable vmas")
> 
> So I assume we want this backported to 6.1?  I added a cc:stable.
> 
> The [2/2] patch is, I think, 6.3-rc1 material?

That is correct.  Sorry, I forgot the cc:stable.
-- 
Mike Kravetz

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/2] hugetlb: really allocate vma lock for all sharable vmas
  2022-12-12 23:50 [PATCH 1/2] hugetlb: really allocate vma lock for all sharable vmas Mike Kravetz
  2022-12-12 23:50 ` [PATCH 2/2] hugetlb: update vma flag check for hugetlb vma lock Mike Kravetz
  2022-12-15 22:37 ` [PATCH 1/2] hugetlb: really allocate vma lock for all sharable vmas Andrew Morton
@ 2022-12-17  1:59 ` Miaohe Lin
  2 siblings, 0 replies; 6+ messages in thread
From: Miaohe Lin @ 2022-12-17  1:59 UTC (permalink / raw)
  To: Mike Kravetz, linux-mm, linux-kernel
  Cc: Muchun Song, David Hildenbrand, Peter Xu, Naoya Horiguchi,
	Aneesh Kumar K . V, James Houghton, Mina Almasry, Andrew Morton

On 2022/12/13 7:50, Mike Kravetz wrote:
> Commit bbff39cc6cbc ("hugetlb: allocate vma lock for all sharable vmas")
> removed the pmd sharable checks in the vma lock helper routines.
> However, it left the functional version of helper routines behind #ifdef
> CONFIG_ARCH_WANT_HUGE_PMD_SHARE.  Therefore, the vma lock is not being

Oh, we missed !CONFIG_ARCH_WANT_HUGE_PMD_SHARE case in the previous review. :(

> used for sharable vmas on architectures that do not support pmd sharing.
> On these architectures, a potential fault/truncation race is exposed
> that could leave pages in a hugetlb file past i_size until the file is
> removed.
> 
> Move the functional vma lock helpers outside the ifdef, and remove the
> non-functional stubs.  Since the vma lock is not just for pmd sharing,
> rename the routine __vma_shareable_flags_pmd.
> 
> Fixes: bbff39cc6cbc ("hugetlb: allocate vma lock for all sharable vmas")
> Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>

Thanks for fixing. This patch looks good to me. Thanks.

Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>

Thanks,
Miaohe Lin


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 2/2] hugetlb: update vma flag check for hugetlb vma lock
  2022-12-12 23:50 ` [PATCH 2/2] hugetlb: update vma flag check for hugetlb vma lock Mike Kravetz
@ 2022-12-17  2:01   ` Miaohe Lin
  0 siblings, 0 replies; 6+ messages in thread
From: Miaohe Lin @ 2022-12-17  2:01 UTC (permalink / raw)
  To: Mike Kravetz, linux-mm, linux-kernel
  Cc: Muchun Song, David Hildenbrand, Peter Xu, Naoya Horiguchi,
	Aneesh Kumar K . V, James Houghton, Mina Almasry, Andrew Morton

On 2022/12/13 7:50, Mike Kravetz wrote:
> The check for whether a hugetlb vma lock exists partially depends on
> the vma's flags.  Currently, it checks for either VM_MAYSHARE or
> VM_SHARED.  The reason both flags are used is because VM_MAYSHARE was
> previously cleared in hugetlb vmas as they are tore down.  This is no
> longer the case, and only the VM_MAYSHARE check is required.
> 
> Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>

Nice cleanup. This is also what I planed to do. ;)

Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>

Thanks,
Miaohe Lin

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2022-12-17  2:02 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-12-12 23:50 [PATCH 1/2] hugetlb: really allocate vma lock for all sharable vmas Mike Kravetz
2022-12-12 23:50 ` [PATCH 2/2] hugetlb: update vma flag check for hugetlb vma lock Mike Kravetz
2022-12-17  2:01   ` Miaohe Lin
2022-12-15 22:37 ` [PATCH 1/2] hugetlb: really allocate vma lock for all sharable vmas Andrew Morton
2022-12-15 22:50   ` Mike Kravetz
2022-12-17  1:59 ` Miaohe Lin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).