All of lore.kernel.org
 help / color / mirror / Atom feed
* [Cluster-devel] cluster/gfs-kernel/src/gfs inode.h ops_export.c
@ 2007-01-16 20:39 wcheng
  2007-01-17 14:12 ` Steven Whitehouse
  0 siblings, 1 reply; 5+ messages in thread
From: wcheng @ 2007-01-16 20:39 UTC (permalink / raw)
  To: cluster-devel.redhat.com

CVSROOT:	/cvs/cluster
Module name:	cluster
Branch: 	RHEL4
Changes by:	wcheng at sourceware.org	2007-01-16 20:39:03

Modified files:
	gfs-kernel/src/gfs: inode.h ops_export.c 

Log message:
	Bugzilla 190475 (rename 3-1)
	
	Yank get inode logic out of gfs_get_dentry() (currently only used by NFS
	file serving). This will allow gfs_rename (patch 3-3) to re-use (share)
	this logic to update its stale inode contents.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/inode.h.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.3&r2=1.3.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/ops_export.c.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.3.2.2&r2=1.3.2.3

--- cluster/gfs-kernel/src/gfs/inode.h	2004/10/08 22:02:50	1.3
+++ cluster/gfs-kernel/src/gfs/inode.h	2007/01/16 20:39:03	1.3.2.1
@@ -53,6 +53,8 @@
 int gfs_alloc_qinode(struct gfs_sbd *sdp);
 int gfs_alloc_linode(struct gfs_sbd *sdp);
 
+struct inode *gfs_refresh_iobj(struct gfs_sbd *sdp, void *inum_obj, int *n);
+
 /*  Inlines  */
 
 static __inline__ int
--- cluster/gfs-kernel/src/gfs/ops_export.c	2007/01/11 18:52:18	1.3.2.2
+++ cluster/gfs-kernel/src/gfs/ops_export.c	2007/01/16 20:39:03	1.3.2.3
@@ -280,62 +280,57 @@
 }
 
 /**
- * gfs_get_dentry -
- * @param1: description
- * @param2: description
- * @param3: description
+ * gfs_refresh_iobj -
+ *  @sdp: pointer to struct gfs_sbd
+ *  @inum: pointer to struct gfs_inum
+ *  @refresh: set to true if inode refreshed
+ *  return: pointer to struct inode or errno
  *
- * Function description
+ *  This function was part of gfs_get_dentry where it
+ *  - allocated a gfs_inode "ip"
+ *  - disk-read in "ip"
+ *  - allocated a vfs inode for this "ip".
  *
- * Returns: what is returned
+ *  We yank it out to allow gfs_rename() to re-use
+ *  this logic.
  */
 
-struct dentry *
-gfs_get_dentry(struct super_block *sb, void *inump)
+struct inode *gfs_refresh_iobj(struct gfs_sbd *sdp, void *inum_obj, int *miss)
 {
-	struct gfs_sbd *sdp = vfs2sdp(sb);
-	struct inode_cookie *cookie = (struct inode_cookie *)inump;
-	struct gfs_inum inum;
 	struct gfs_holder i_gh, ri_gh, rgd_gh;
 	struct gfs_rgrpd *rgd;
 	struct buffer_head *bh;
 	struct gfs_dinode *di;
 	struct gfs_inode *ip;
 	struct inode *inode;
-	struct dentry *dentry;
+	struct gfs_inum *inum = (struct gfs_inum *) inum_obj;
 	int error;
 
-	atomic_inc(&sdp->sd_ops_export);
-
-	if (!cookie->formal_ino ||
-	    cookie->formal_ino == sdp->sd_jiinode->i_num.no_formal_ino ||
-	    cookie->formal_ino == sdp->sd_riinode->i_num.no_formal_ino ||
-	    cookie->formal_ino == sdp->sd_qinode->i_num.no_formal_ino ||
-	    cookie->formal_ino == sdp->sd_linode->i_num.no_formal_ino)
-		return ERR_PTR(-EINVAL);
-
-	inum.no_formal_ino = cookie->formal_ino;
-	inum.no_addr = cookie->formal_ino;
-
 	error = gfs_glock_nq_num(sdp,
-				 inum.no_formal_ino, &gfs_inode_glops,
+				 inum->no_formal_ino, &gfs_inode_glops,
 				 LM_ST_SHARED, LM_FLAG_ANY | GL_LOCAL_EXCL,
 				 &i_gh);
 	if (error)
 		return ERR_PTR(error);
 
-	error = gfs_inode_get(i_gh.gh_gl, &inum, NO_CREATE, &ip);
+	error = gfs_inode_get(i_gh.gh_gl, inum, NO_CREATE, &ip);
 	if (error)
 		goto fail;
 	if (ip)
 		goto out;
 
+	/*
+	 * Used by NFS support statistics for FHs that miss their dentres.
+	 */
+	if (miss)
+		*miss = 1;
+
 	error = gfs_rindex_hold(sdp, &ri_gh);
 	if (error)
 		goto fail;
 
 	error = -EINVAL;
-	rgd = gfs_blk2rgrpd(sdp, inum.no_addr);
+	rgd = gfs_blk2rgrpd(sdp, inum->no_addr);
 	if (!rgd)
 		goto fail_rindex;
 
@@ -344,10 +339,10 @@
 		goto fail_rindex;
 
 	error = -ESTALE;
-	if (gfs_get_block_type(rgd, inum.no_addr) != GFS_BLKST_USEDMETA)
+	if (gfs_get_block_type(rgd, inum->no_addr) != GFS_BLKST_USEDMETA)
 		goto fail_rgd;
 
-	error = gfs_dread(i_gh.gh_gl, inum.no_addr,
+	error = gfs_dread(i_gh.gh_gl, inum->no_addr,
 			  DIO_START | DIO_WAIT, &bh);
 	if (error)
 		goto fail_rgd;
@@ -368,8 +363,6 @@
 	if (error)
 		goto fail;
 
-	atomic_inc(&sdp->sd_fh2dentry_misses);
-
  out:
 	gfs_glock_dq_uninit(&i_gh);
 
@@ -379,6 +372,61 @@
 	if (!inode)
 		return ERR_PTR(-ENOMEM);
 
+	return inode;
+
+ fail_relse:
+        brelse(bh);
+
+ fail_rgd:
+	gfs_glock_dq_uninit(&rgd_gh);
+
+ fail_rindex:
+	gfs_glock_dq_uninit(&ri_gh);
+
+ fail:
+	gfs_glock_dq_uninit(&i_gh);
+	return ERR_PTR(error);
+}
+
+/**
+ * gfs_get_dentry -
+ * @param1: description
+ * @param2: description
+ * @param3: description
+ *
+ * Function description
+ *
+ * Returns: what is returned
+ */
+
+struct dentry *
+gfs_get_dentry(struct super_block *sb, void *inump)
+{
+	struct gfs_sbd *sdp = vfs2sdp(sb);
+	struct inode_cookie *cookie = (struct inode_cookie *)inump;
+	struct gfs_inum inum;
+	struct inode *inode;
+	struct dentry *dentry;
+	int dentry_miss=0;
+
+	atomic_inc(&sdp->sd_ops_export);
+
+	if (!cookie->formal_ino ||
+	    cookie->formal_ino == sdp->sd_jiinode->i_num.no_formal_ino ||
+	    cookie->formal_ino == sdp->sd_riinode->i_num.no_formal_ino ||
+	    cookie->formal_ino == sdp->sd_qinode->i_num.no_formal_ino ||
+	    cookie->formal_ino == sdp->sd_linode->i_num.no_formal_ino)
+		return ERR_PTR(-EINVAL);
+
+	inum.no_formal_ino = cookie->formal_ino;
+	inum.no_addr = cookie->formal_ino;
+
+	inode = gfs_refresh_iobj(sdp, &inum, &dentry_miss);
+	if (dentry_miss)
+		atomic_inc(&sdp->sd_fh2dentry_misses);
+	if (IS_ERR(inode))
+		return ERR_PTR((long)inode);
+
 	/* inode->i_generation is GFS dinode's mh_incarn value */
 	if (cookie->gen_valid && cookie->gen != inode->i_generation) {
 		iput(inode);
@@ -393,19 +441,6 @@
 
 	dentry->d_op = &gfs_dops;
 	return dentry;
-
- fail_relse:
-        brelse(bh);
-
- fail_rgd:
-	gfs_glock_dq_uninit(&rgd_gh);
-
- fail_rindex:
-	gfs_glock_dq_uninit(&ri_gh);
-
- fail:
-	gfs_glock_dq_uninit(&i_gh);
-	return ERR_PTR(error);
 }
 
 struct export_operations gfs_export_ops = {



^ permalink raw reply	[flat|nested] 5+ messages in thread

* [Cluster-devel] cluster/gfs-kernel/src/gfs inode.h ops_export.c
  2007-01-16 20:39 [Cluster-devel] cluster/gfs-kernel/src/gfs inode.h ops_export.c wcheng
@ 2007-01-17 14:12 ` Steven Whitehouse
  2007-01-17 23:05   ` Wendy Cheng
  0 siblings, 1 reply; 5+ messages in thread
From: Steven Whitehouse @ 2007-01-17 14:12 UTC (permalink / raw)
  To: cluster-devel.redhat.com

Hi,

Just wondering why this:

On Tue, 2007-01-16 at 20:39 +0000, wcheng at sourceware.org wrote:
[snip]
> --- cluster/gfs-kernel/src/gfs/ops_export.c	2007/01/11 18:52:18	1.3.2.2
> +++ cluster/gfs-kernel/src/gfs/ops_export.c	2007/01/16 20:39:03	1.3.2.3
> @@ -280,62 +280,57 @@
>  }
>  
>  /**
> - * gfs_get_dentry -
> - * @param1: description
> - * @param2: description
> - * @param3: description
> + * gfs_refresh_iobj -
> + *  @sdp: pointer to struct gfs_sbd
> + *  @inum: pointer to struct gfs_inum
> + *  @refresh: set to true if inode refreshed
> + *  return: pointer to struct inode or errno
>   *
> - * Function description
> + *  This function was part of gfs_get_dentry where it
> + *  - allocated a gfs_inode "ip"
> + *  - disk-read in "ip"
> + *  - allocated a vfs inode for this "ip".
>   *
> - * Returns: what is returned
> + *  We yank it out to allow gfs_rename() to re-use
> + *  this logic.
>   */
>  
> -struct dentry *
> -gfs_get_dentry(struct super_block *sb, void *inump)
> +struct inode *gfs_refresh_iobj(struct gfs_sbd *sdp, void *inum_obj, int *miss)
>  {
> -	struct gfs_sbd *sdp = vfs2sdp(sb);
> -	struct inode_cookie *cookie = (struct inode_cookie *)inump;
> -	struct gfs_inum inum;
>  	struct gfs_holder i_gh, ri_gh, rgd_gh;
>  	struct gfs_rgrpd *rgd;
>  	struct buffer_head *bh;
>  	struct gfs_dinode *di;
>  	struct gfs_inode *ip;
>  	struct inode *inode;
> -	struct dentry *dentry;
> +	struct gfs_inum *inum = (struct gfs_inum *) inum_obj;
>  	int error;
>  
> -	atomic_inc(&sdp->sd_ops_export);
> -
> -	if (!cookie->formal_ino ||
> -	    cookie->formal_ino == sdp->sd_jiinode->i_num.no_formal_ino ||
> -	    cookie->formal_ino == sdp->sd_riinode->i_num.no_formal_ino ||
> -	    cookie->formal_ino == sdp->sd_qinode->i_num.no_formal_ino ||
> -	    cookie->formal_ino == sdp->sd_linode->i_num.no_formal_ino)
> -		return ERR_PTR(-EINVAL);
> -
> -	inum.no_formal_ino = cookie->formal_ino;
> -	inum.no_addr = cookie->formal_ino;
> -
>  	error = gfs_glock_nq_num(sdp,
> -				 inum.no_formal_ino, &gfs_inode_glops,
> +				 inum->no_formal_ino, &gfs_inode_glops,
>  				 LM_ST_SHARED, LM_FLAG_ANY | GL_LOCAL_EXCL,
>  				 &i_gh);
needs the GL_LOCAL_EXCL flag. I would have thought an ordinary shared
lock would be enough?

In GFS2 I'd like to try and eliminate this flag since it would be just
as easy to use a combination of a mutex or rwsem and a glock to achieve
the same thing and it makes the glock code simpler. Its only used in
about three places anyway and I'm not so sure that its required at all
in this case,

Steve.




^ permalink raw reply	[flat|nested] 5+ messages in thread

* [Cluster-devel] cluster/gfs-kernel/src/gfs inode.h ops_export.c
  2007-01-17 14:12 ` Steven Whitehouse
@ 2007-01-17 23:05   ` Wendy Cheng
  2007-01-18  9:56     ` Steven Whitehouse
  0 siblings, 1 reply; 5+ messages in thread
From: Wendy Cheng @ 2007-01-17 23:05 UTC (permalink / raw)
  To: cluster-devel.redhat.com

Steven Whitehouse wrote:
> Hi,
>
> Just wondering why this:
>
> On Tue, 2007-01-16 at 20:39 +0000, wcheng at sourceware.org wrote:
> [snip]
>> -
>>  	error = gfs_glock_nq_num(sdp,
>> -				 inum.no_formal_ino, &gfs_inode_glops,
>> +				 inum->no_formal_ino, &gfs_inode_glops,
>>  				 LM_ST_SHARED, LM_FLAG_ANY | GL_LOCAL_EXCL,
>>  				 &i_gh);
>>     
> needs the GL_LOCAL_EXCL flag. I would have thought an ordinary shared
> lock would be enough?
>   
It is required to prevent (by serializing) other process (on the same 
node) to create this gfs inode at the same time (equivalence of an 
semaphore or mutex). Lookup (and several other GFS1 mount/umount) code 
needs this flag too. This (my guess) is to reduce the need to create 
another set of semaphores/mutex. In summary, I think it has two advantages:
1. Less locks
2. Easier to track who owns what (since glock holder is easy to find 
when compared with sempahore/mutex).

The down-side is that it makes glock code difficult to understand.  For 
GFS1, let's keep it this way. For GFS2,  your call :) ...

-- Wendy
> In GFS2 I'd like to try and eliminate this flag since it would be just
> as easy to use a combination of a mutex or rwsem and a glock to achieve
> the same thing and it makes the glock code simpler. Its only used in
> about three places anyway and I'm not so sure that its required at all
> in this case,
>
> Steve.
>
>
>   



^ permalink raw reply	[flat|nested] 5+ messages in thread

* [Cluster-devel] cluster/gfs-kernel/src/gfs inode.h ops_export.c
  2007-01-17 23:05   ` Wendy Cheng
@ 2007-01-18  9:56     ` Steven Whitehouse
  2007-01-18 15:17       ` Steven Whitehouse
  0 siblings, 1 reply; 5+ messages in thread
From: Steven Whitehouse @ 2007-01-18  9:56 UTC (permalink / raw)
  To: cluster-devel.redhat.com

Hi,

On Wed, 2007-01-17 at 18:05 -0500, Wendy Cheng wrote:
> Steven Whitehouse wrote:
> > Hi,
> >
> > Just wondering why this:
> >
> > On Tue, 2007-01-16 at 20:39 +0000, wcheng at sourceware.org wrote:
> > [snip]
> >> -
> >>  	error = gfs_glock_nq_num(sdp,
> >> -				 inum.no_formal_ino, &gfs_inode_glops,
> >> +				 inum->no_formal_ino, &gfs_inode_glops,
> >>  				 LM_ST_SHARED, LM_FLAG_ANY | GL_LOCAL_EXCL,
> >>  				 &i_gh);
> >>     
> > needs the GL_LOCAL_EXCL flag. I would have thought an ordinary shared
> > lock would be enough?
> >   
> It is required to prevent (by serializing) other process (on the same 
> node) to create this gfs inode at the same time (equivalence of an 
> semaphore or mutex). Lookup (and several other GFS1 mount/umount) code 
> needs this flag too. This (my guess) is to reduce the need to create 
> another set of semaphores/mutex. In summary, I think it has two advantages:
> 1. Less locks
> 2. Easier to track who owns what (since glock holder is easy to find 
> when compared with sempahore/mutex).
> 
Ah, that kind of makes sense then for gfs1.

> The down-side is that it makes glock code difficult to understand.  For 
> GFS1, let's keep it this way. For GFS2,  your call :) ...
> 
Yes, I don't want to go changing things like that in GFS1 at all :-) In
GFS2 we use the inode cache as its supposed to be used and that deals
with all the required local serialisation, so it looks to be redundant
in that case.

Also in more recent kernels, lockdep deals with your point #2 above, so
I can't see anything really standing in the way of swapping the few
remaining cases for a mutex or rwsem and glock combination.

In fact one of the reason for doing this is to work towards the
possibility of using lockdep with glocks. Lockdep has no concept of
"local" and "remote" but it does have the concept of nested locks so its
one step closer to being able to make use of that,

Steve.





^ permalink raw reply	[flat|nested] 5+ messages in thread

* [Cluster-devel] cluster/gfs-kernel/src/gfs inode.h ops_export.c
  2007-01-18  9:56     ` Steven Whitehouse
@ 2007-01-18 15:17       ` Steven Whitehouse
  0 siblings, 0 replies; 5+ messages in thread
From: Steven Whitehouse @ 2007-01-18 15:17 UTC (permalink / raw)
  To: cluster-devel.redhat.com

Hi,

Here is a proposed patch for GFS2 to remove the local exclusive flag. In
the other places it was used, mutex's are always held earlier in the
call path, so it appears redundant.

Also, the GFS2 holders were setting local exclusive in any case where
the requested lock was LM_ST_EXCLUSIVE. So the other places in the glock
code where the flag was tested have been replaced with tests for the
lock state being LM_ST_EXCLUSIVE in order to ensure the logic is the
same as before.

Steve.


diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 4381469..14577fd 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -396,10 +396,6 @@ void gfs2_holder_init(struct gfs2_glock 
 	gh->gh_error = 0;
 	gh->gh_iflags = 0;
 	init_completion(&gh->gh_wait);
-
-	if (gh->gh_state == LM_ST_EXCLUSIVE)
-		gh->gh_flags |= GL_LOCAL_EXCL;
-
 	gfs2_glock_hold(gl);
 }
 
@@ -417,9 +413,6 @@ void gfs2_holder_reinit(unsigned int sta
 {
 	gh->gh_state = state;
 	gh->gh_flags = flags;
-	if (gh->gh_state == LM_ST_EXCLUSIVE)
-		gh->gh_flags |= GL_LOCAL_EXCL;
-
 	gh->gh_iflags &= 1 << HIF_ALLOCED;
 	gh->gh_ip = (unsigned long)__builtin_return_address(0);
 }
@@ -537,11 +530,11 @@ static int rq_promote(struct gfs2_holder
 		set_bit(GLF_LOCK, &gl->gl_flags);
 	} else {
 		struct gfs2_holder *next_gh;
-		if (gh->gh_flags & GL_LOCAL_EXCL)
+		if (gh->gh_state == LM_ST_EXCLUSIVE)
 			return 1;
 		next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
 				     gh_list);
-		if (next_gh->gh_flags & GL_LOCAL_EXCL)
+		if (next_gh->gh_state == LM_ST_EXCLUSIVE)
 			 return 1;
 	}
 
@@ -1470,10 +1463,7 @@ static int glock_compare(const void *arg
 		return 1;
 	if (a->ln_number < b->ln_number)
 		return -1;
-	if (gh_a->gh_state == LM_ST_SHARED && gh_b->gh_state == LM_ST_EXCLUSIVE)
-		return 1;
-	if (!(gh_a->gh_flags & GL_LOCAL_EXCL) && (gh_b->gh_flags & GL_LOCAL_EXCL))
-		return 1;
+	BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
 	return 0;
 }
 
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index fb39108..bdcbfbd 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -20,7 +20,6 @@ #define LM_FLAG_NOEXP		0x00000004
 #define LM_FLAG_ANY		0x00000008
 #define LM_FLAG_PRIORITY	0x00000010 */
 
-#define GL_LOCAL_EXCL		0x00000020
 #define GL_ASYNC		0x00000040
 #define GL_EXACT		0x00000080
 #define GL_SKIP			0x00000100
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index b068d10..094adda 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -295,7 +295,7 @@ static int inode_go_lock(struct gfs2_hol
 
 	if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) &&
 	    (gl->gl_state == LM_ST_EXCLUSIVE) &&
-	    (gh->gh_flags & GL_LOCAL_EXCL))
+	    (gh->gh_state == LM_ST_EXCLUSIVE))
 		error = gfs2_truncatei_resume(ip);
 
 	return error;
diff --git a/fs/gfs2/ops_export.c b/fs/gfs2/ops_export.c
index 6ea979c..0453cbe 100644
--- a/fs/gfs2/ops_export.c
+++ b/fs/gfs2/ops_export.c
@@ -217,8 +217,7 @@ static struct dentry *gfs2_get_dentry(st
 	}
 
 	error = gfs2_glock_nq_num(sdp, inum->no_addr, &gfs2_inode_glops,
-				  LM_ST_SHARED, LM_FLAG_ANY | GL_LOCAL_EXCL,
-				  &i_gh);
+				  LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
 	if (error)
 		return ERR_PTR(error);
 
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 43a24f2..6b5b138 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -359,8 +359,7 @@ int gfs2_jindex_hold(struct gfs2_sbd *sd
 	mutex_lock(&sdp->sd_jindex_mutex);
 
 	for (;;) {
-		error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED,
-					   GL_LOCAL_EXCL, ji_gh);
+		error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh);
 		if (error)
 			break;
 
@@ -529,8 +528,7 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp
 	struct gfs2_log_header_host head;
 	int error;
 
-	error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED,
-				   GL_LOCAL_EXCL, &t_gh);
+	error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, 0, &t_gh);
 	if (error)
 		return error;
 
@@ -583,9 +581,8 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp
 	gfs2_quota_sync(sdp);
 	gfs2_statfs_sync(sdp);
 
-	error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED,
-				GL_LOCAL_EXCL | GL_NOCACHE,
-				&t_gh);
+	error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE,
+				   &t_gh);
 	if (error && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
 		return error;
 




^ permalink raw reply related	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2007-01-18 15:17 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2007-01-16 20:39 [Cluster-devel] cluster/gfs-kernel/src/gfs inode.h ops_export.c wcheng
2007-01-17 14:12 ` Steven Whitehouse
2007-01-17 23:05   ` Wendy Cheng
2007-01-18  9:56     ` Steven Whitehouse
2007-01-18 15:17       ` Steven Whitehouse

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.