From: Goldwyn Rodrigues <rgoldwyn@suse.de> To: linux-btrfs@vger.kernel.org Cc: kilobyte@angband.pl, jack@suse.cz, darrick.wong@oracle.com, nborisov@suse.com, linux-nvdimm@lists.01.org, david@fromorbit.com, dsterba@suse.cz, willy@infradead.org, linux-fsdevel@vger.kernel.org, hch@lst.de, Goldwyn Rodrigues <rgoldwyn@suse.com> Subject: [PATCH 10/18] dax: replace mmap entry in case of CoW Date: Mon, 29 Apr 2019 12:26:41 -0500 [thread overview] Message-ID: <20190429172649.8288-11-rgoldwyn@suse.de> (raw) In-Reply-To: <20190429172649.8288-1-rgoldwyn@suse.de> From: Goldwyn Rodrigues <rgoldwyn@suse.com> We replace the existing entry to the newly allocated one in case of CoW. Also, we mark the entry as PAGECACHE_TAG_TOWRITE so writeback marks this entry as writeprotected. This helps us snapshots so new write pagefaults after snapshots trigger a CoW. btrfs does not support hugepages so we don't handle PMD. Signed-off-by: Goldwyn Rodrigues <rgoldwyn@suse.com> --- fs/dax.c | 36 ++++++++++++++++++++++++++++-------- 1 file changed, 28 insertions(+), 8 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index 718b1632a39d..07e8ff20161d 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -700,6 +700,9 @@ static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev, return 0; } +#define DAX_IF_DIRTY (1ULL << 0) +#define DAX_IF_COW (1ULL << 1) + /* * By this point grab_mapping_entry() has ensured that we have a locked entry * of the appropriate size so we don't have to worry about downgrading PMDs to @@ -709,14 +712,17 @@ static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev, */ static void *dax_insert_entry(struct xa_state *xas, struct address_space *mapping, struct vm_fault *vmf, - void *entry, pfn_t pfn, unsigned long flags, bool dirty) + void *entry, pfn_t pfn, unsigned long flags, + unsigned long insert_flags) { void *new_entry = dax_make_entry(pfn, flags); + bool dirty = insert_flags & DAX_IF_DIRTY; + bool cow = insert_flags & DAX_IF_COW; if (dirty) __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); - if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) { + if (cow || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) { unsigned long index = xas->xa_index; /* we are replacing a zero page with block mapping */ if (dax_is_pmd_entry(entry)) @@ -728,12 +734,12 @@ static void *dax_insert_entry(struct xa_state *xas, xas_reset(xas); xas_lock_irq(xas); - if (dax_entry_size(entry) != dax_entry_size(new_entry)) { + if (cow || (dax_entry_size(entry) != dax_entry_size(new_entry))) { dax_disassociate_entry(entry, mapping, false); dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address); } - if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { + if (cow || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { /* * Only swap our new entry into the page cache if the current * entry is a zero page or an empty entry. If a normal PTE or @@ -753,6 +759,9 @@ static void *dax_insert_entry(struct xa_state *xas, if (dirty) xas_set_mark(xas, PAGECACHE_TAG_DIRTY); + if (cow) + xas_set_mark(xas, PAGECACHE_TAG_TOWRITE); + xas_unlock_irq(xas); return entry; } @@ -1032,7 +1041,7 @@ static vm_fault_t dax_load_hole(struct xa_state *xas, vm_fault_t ret; *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, - DAX_ZERO_PAGE, false); + DAX_ZERO_PAGE, 0); ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); trace_dax_load_hole(inode, vmf, ret); @@ -1296,6 +1305,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, vm_fault_t ret = 0; void *entry; pfn_t pfn; + unsigned long insert_flags = 0; trace_dax_pte_fault(inode, vmf, ret); /* @@ -1357,6 +1367,8 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, error = copy_user_dax(iomap.bdev, iomap.dax_dev, sector, PAGE_SIZE, vmf->cow_page, vaddr); break; + case IOMAP_DAX_COW: + /* Should not be setting this - fallthrough */ default: WARN_ON_ONCE(1); error = -EIO; @@ -1377,6 +1389,8 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, switch (iomap.type) { case IOMAP_DAX_COW: + insert_flags |= DAX_IF_COW; + /* fallthrough */ case IOMAP_MAPPED: if (iomap.flags & IOMAP_F_NEW) { count_vm_event(PGMAJFAULT); @@ -1396,8 +1410,10 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, } else memset(addr, 0, PAGE_SIZE); } + if (write && !sync) + insert_flags |= DAX_IF_DIRTY; entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, - 0, write && !sync); + 0, insert_flags); /* * If we are doing synchronous page fault and inode needs fsync, @@ -1478,7 +1494,7 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, pfn = page_to_pfn_t(zero_page); *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, - DAX_PMD | DAX_ZERO_PAGE, false); + DAX_PMD | DAX_ZERO_PAGE, 0); if (arch_needs_pgtable_deposit()) { pgtable = pte_alloc_one(vma->vm_mm); @@ -1528,6 +1544,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, loff_t pos; int error; pfn_t pfn; + unsigned long insert_flags = 0; /* * Check whether offset isn't beyond end of file now. Caller is @@ -1612,8 +1629,11 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, if (error < 0) goto finish_iomap; + if (write && !sync) + insert_flags |= DAX_IF_DIRTY; + entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, - DAX_PMD, write && !sync); + DAX_PMD, insert_flags); /* * If we are doing synchronous page fault and inode needs fsync, -- 2.16.4 _______________________________________________ Linux-nvdimm mailing list Linux-nvdimm@lists.01.org https://lists.01.org/mailman/listinfo/linux-nvdimm
WARNING: multiple messages have this Message-ID (diff)
From: Goldwyn Rodrigues <rgoldwyn@suse.de> To: linux-btrfs@vger.kernel.org Cc: kilobyte@angband.pl, linux-fsdevel@vger.kernel.org, jack@suse.cz, david@fromorbit.com, willy@infradead.org, hch@lst.de, darrick.wong@oracle.com, dsterba@suse.cz, nborisov@suse.com, linux-nvdimm@lists.01.org, Goldwyn Rodrigues <rgoldwyn@suse.com> Subject: [PATCH 10/18] dax: replace mmap entry in case of CoW Date: Mon, 29 Apr 2019 12:26:41 -0500 [thread overview] Message-ID: <20190429172649.8288-11-rgoldwyn@suse.de> (raw) In-Reply-To: <20190429172649.8288-1-rgoldwyn@suse.de> From: Goldwyn Rodrigues <rgoldwyn@suse.com> We replace the existing entry to the newly allocated one in case of CoW. Also, we mark the entry as PAGECACHE_TAG_TOWRITE so writeback marks this entry as writeprotected. This helps us snapshots so new write pagefaults after snapshots trigger a CoW. btrfs does not support hugepages so we don't handle PMD. Signed-off-by: Goldwyn Rodrigues <rgoldwyn@suse.com> --- fs/dax.c | 36 ++++++++++++++++++++++++++++-------- 1 file changed, 28 insertions(+), 8 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index 718b1632a39d..07e8ff20161d 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -700,6 +700,9 @@ static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev, return 0; } +#define DAX_IF_DIRTY (1ULL << 0) +#define DAX_IF_COW (1ULL << 1) + /* * By this point grab_mapping_entry() has ensured that we have a locked entry * of the appropriate size so we don't have to worry about downgrading PMDs to @@ -709,14 +712,17 @@ static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev, */ static void *dax_insert_entry(struct xa_state *xas, struct address_space *mapping, struct vm_fault *vmf, - void *entry, pfn_t pfn, unsigned long flags, bool dirty) + void *entry, pfn_t pfn, unsigned long flags, + unsigned long insert_flags) { void *new_entry = dax_make_entry(pfn, flags); + bool dirty = insert_flags & DAX_IF_DIRTY; + bool cow = insert_flags & DAX_IF_COW; if (dirty) __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); - if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) { + if (cow || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) { unsigned long index = xas->xa_index; /* we are replacing a zero page with block mapping */ if (dax_is_pmd_entry(entry)) @@ -728,12 +734,12 @@ static void *dax_insert_entry(struct xa_state *xas, xas_reset(xas); xas_lock_irq(xas); - if (dax_entry_size(entry) != dax_entry_size(new_entry)) { + if (cow || (dax_entry_size(entry) != dax_entry_size(new_entry))) { dax_disassociate_entry(entry, mapping, false); dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address); } - if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { + if (cow || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { /* * Only swap our new entry into the page cache if the current * entry is a zero page or an empty entry. If a normal PTE or @@ -753,6 +759,9 @@ static void *dax_insert_entry(struct xa_state *xas, if (dirty) xas_set_mark(xas, PAGECACHE_TAG_DIRTY); + if (cow) + xas_set_mark(xas, PAGECACHE_TAG_TOWRITE); + xas_unlock_irq(xas); return entry; } @@ -1032,7 +1041,7 @@ static vm_fault_t dax_load_hole(struct xa_state *xas, vm_fault_t ret; *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, - DAX_ZERO_PAGE, false); + DAX_ZERO_PAGE, 0); ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); trace_dax_load_hole(inode, vmf, ret); @@ -1296,6 +1305,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, vm_fault_t ret = 0; void *entry; pfn_t pfn; + unsigned long insert_flags = 0; trace_dax_pte_fault(inode, vmf, ret); /* @@ -1357,6 +1367,8 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, error = copy_user_dax(iomap.bdev, iomap.dax_dev, sector, PAGE_SIZE, vmf->cow_page, vaddr); break; + case IOMAP_DAX_COW: + /* Should not be setting this - fallthrough */ default: WARN_ON_ONCE(1); error = -EIO; @@ -1377,6 +1389,8 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, switch (iomap.type) { case IOMAP_DAX_COW: + insert_flags |= DAX_IF_COW; + /* fallthrough */ case IOMAP_MAPPED: if (iomap.flags & IOMAP_F_NEW) { count_vm_event(PGMAJFAULT); @@ -1396,8 +1410,10 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, } else memset(addr, 0, PAGE_SIZE); } + if (write && !sync) + insert_flags |= DAX_IF_DIRTY; entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, - 0, write && !sync); + 0, insert_flags); /* * If we are doing synchronous page fault and inode needs fsync, @@ -1478,7 +1494,7 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, pfn = page_to_pfn_t(zero_page); *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, - DAX_PMD | DAX_ZERO_PAGE, false); + DAX_PMD | DAX_ZERO_PAGE, 0); if (arch_needs_pgtable_deposit()) { pgtable = pte_alloc_one(vma->vm_mm); @@ -1528,6 +1544,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, loff_t pos; int error; pfn_t pfn; + unsigned long insert_flags = 0; /* * Check whether offset isn't beyond end of file now. Caller is @@ -1612,8 +1629,11 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, if (error < 0) goto finish_iomap; + if (write && !sync) + insert_flags |= DAX_IF_DIRTY; + entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, - DAX_PMD, write && !sync); + DAX_PMD, insert_flags); /* * If we are doing synchronous page fault and inode needs fsync, -- 2.16.4
next prev parent reply other threads:[~2019-04-29 17:27 UTC|newest] Thread overview: 113+ messages / expand[flat|nested] mbox.gz Atom feed top 2019-04-29 17:26 [PATCH v4 00/18] btrfs dax support Goldwyn Rodrigues 2019-04-29 17:26 ` Goldwyn Rodrigues 2019-04-29 17:26 ` [PATCH 01/18] btrfs: create a mount option for dax Goldwyn Rodrigues 2019-04-29 17:26 ` Goldwyn Rodrigues 2019-05-21 18:02 ` Darrick J. Wong 2019-05-21 18:02 ` Darrick J. Wong 2019-04-29 17:26 ` [PATCH 02/18] btrfs: Carve out btrfs_get_extent_map_write() out of btrfs_get_blocks_write() Goldwyn Rodrigues 2019-04-29 17:26 ` Goldwyn Rodrigues 2019-04-29 17:26 ` [PATCH 04/18] dax: Introduce IOMAP_DAX_COW to CoW edges during writes Goldwyn Rodrigues 2019-04-29 17:26 ` Goldwyn Rodrigues 2019-05-21 16:51 ` Darrick J. Wong 2019-05-22 20:14 ` Goldwyn Rodrigues 2019-05-22 20:14 ` Goldwyn Rodrigues 2019-05-23 2:10 ` Dave Chinner 2019-05-23 2:10 ` Dave Chinner 2019-05-23 9:05 ` Shiyang Ruan 2019-05-23 9:05 ` Shiyang Ruan 2019-05-23 11:51 ` Goldwyn Rodrigues 2019-05-23 11:51 ` Goldwyn Rodrigues 2019-05-27 8:25 ` Shiyang Ruan 2019-05-27 8:25 ` Shiyang Ruan 2019-05-28 9:17 ` Jan Kara 2019-05-28 9:17 ` Jan Kara 2019-05-29 2:01 ` Shiyang Ruan 2019-05-29 2:01 ` Shiyang Ruan 2019-05-29 2:47 ` Dave Chinner 2019-05-29 2:47 ` Dave Chinner 2019-05-29 4:02 ` Shiyang Ruan 2019-05-29 4:02 ` Shiyang Ruan 2019-05-29 4:07 ` Darrick J. Wong 2019-05-29 4:07 ` Darrick J. Wong 2019-05-29 4:46 ` Dave Chinner 2019-05-29 4:46 ` Dave Chinner 2019-05-29 13:46 ` Jan Kara 2019-05-29 13:46 ` Jan Kara 2019-05-29 22:14 ` Dave Chinner 2019-05-29 22:14 ` Dave Chinner 2019-05-30 11:16 ` Jan Kara 2019-05-30 11:16 ` Jan Kara 2019-05-30 22:59 ` Dave Chinner 2019-05-30 22:59 ` Dave Chinner 2019-04-29 17:26 ` [PATCH 05/18] btrfs: return whether extent is nocow or not Goldwyn Rodrigues 2019-04-29 17:26 ` Goldwyn Rodrigues 2019-04-29 17:26 ` [PATCH 06/18] btrfs: Rename __endio_write_update_ordered() to btrfs_update_ordered_extent() Goldwyn Rodrigues 2019-04-29 17:26 ` Goldwyn Rodrigues [not found] ` <20190429172649.8288-1-rgoldwyn-l3A5Bk7waGM@public.gmane.org> 2019-04-29 17:26 ` [PATCH 03/18] btrfs: basic dax read Goldwyn Rodrigues 2019-04-29 17:26 ` Goldwyn Rodrigues [not found] ` <20190429172649.8288-4-rgoldwyn-l3A5Bk7waGM@public.gmane.org> 2019-05-21 15:14 ` Darrick J. Wong 2019-05-21 15:14 ` Darrick J. Wong 2019-05-22 21:50 ` Goldwyn Rodrigues 2019-04-29 17:26 ` [PATCH 07/18] btrfs: add dax write support Goldwyn Rodrigues 2019-04-29 17:26 ` Goldwyn Rodrigues [not found] ` <20190429172649.8288-8-rgoldwyn-l3A5Bk7waGM@public.gmane.org> 2019-05-21 17:08 ` Darrick J. Wong 2019-05-21 17:08 ` Darrick J. Wong 2019-04-29 17:26 ` [PATCH 13/18] fs: dedup file range to use a compare function Goldwyn Rodrigues 2019-04-29 17:26 ` Goldwyn Rodrigues [not found] ` <20190429172649.8288-14-rgoldwyn-l3A5Bk7waGM@public.gmane.org> 2019-05-21 18:17 ` Darrick J. Wong 2019-05-21 18:17 ` Darrick J. Wong 2019-04-29 17:26 ` [PATCH 18/18] btrfs: trace functions for btrfs_iomap_begin/end Goldwyn Rodrigues 2019-04-29 17:26 ` Goldwyn Rodrigues 2019-04-29 17:26 ` [PATCH 08/18] dax: memcpy page in case of IOMAP_DAX_COW for mmap faults Goldwyn Rodrigues 2019-04-29 17:26 ` Goldwyn Rodrigues 2019-05-21 17:46 ` Darrick J. Wong 2019-05-21 17:46 ` Darrick J. Wong 2019-05-22 19:11 ` Goldwyn Rodrigues 2019-05-22 19:11 ` Goldwyn Rodrigues 2019-05-23 4:02 ` Darrick J. Wong 2019-05-23 4:02 ` Darrick J. Wong 2019-05-23 12:10 ` Jan Kara 2019-05-23 12:10 ` Jan Kara 2019-04-29 17:26 ` [PATCH 09/18] btrfs: Add dax specific address_space_operations Goldwyn Rodrigues 2019-04-29 17:26 ` Goldwyn Rodrigues 2019-04-29 17:26 ` Goldwyn Rodrigues [this message] 2019-04-29 17:26 ` [PATCH 10/18] dax: replace mmap entry in case of CoW Goldwyn Rodrigues 2019-05-21 17:35 ` Darrick J. Wong 2019-05-21 17:35 ` Darrick J. Wong 2019-05-23 13:38 ` Jan Kara 2019-05-23 13:38 ` Jan Kara 2019-04-29 17:26 ` [PATCH 11/18] btrfs: add dax mmap support Goldwyn Rodrigues 2019-04-29 17:26 ` Goldwyn Rodrigues 2019-04-29 17:26 ` [PATCH 12/18] btrfs: allow MAP_SYNC mmap Goldwyn Rodrigues 2019-04-29 17:26 ` Goldwyn Rodrigues 2019-05-10 15:32 ` [PATCH for-goldwyn] btrfs: disallow MAP_SYNC outside of DAX mounts Adam Borowski 2019-05-10 15:32 ` Adam Borowski 2019-05-10 15:41 ` Dan Williams 2019-05-10 15:41 ` Dan Williams 2019-05-10 15:59 ` Pankaj Gupta 2019-05-10 15:59 ` Pankaj Gupta 2019-05-23 13:44 ` [PATCH 12/18] btrfs: allow MAP_SYNC mmap Jan Kara 2019-05-23 13:44 ` Jan Kara 2019-05-23 16:19 ` Adam Borowski 2019-05-23 16:19 ` Adam Borowski 2019-04-29 17:26 ` [PATCH 14/18] dax: memcpy before zeroing range Goldwyn Rodrigues 2019-04-29 17:26 ` Goldwyn Rodrigues 2019-05-21 17:27 ` Darrick J. Wong 2019-05-21 17:27 ` Darrick J. Wong 2019-04-29 17:26 ` [PATCH 15/18] btrfs: handle dax page zeroing Goldwyn Rodrigues 2019-04-29 17:26 ` [PATCH 16/18] btrfs: Writeprotect mmap pages on snapshot Goldwyn Rodrigues 2019-04-29 17:26 ` Goldwyn Rodrigues 2019-05-23 14:04 ` Jan Kara 2019-05-23 14:04 ` Jan Kara 2019-05-23 15:27 ` Goldwyn Rodrigues 2019-05-23 15:27 ` Goldwyn Rodrigues 2019-05-23 19:07 ` Jan Kara 2019-05-23 19:07 ` Jan Kara 2019-05-23 21:22 ` Goldwyn Rodrigues 2019-05-23 21:22 ` Goldwyn Rodrigues 2019-04-29 17:26 ` [PATCH 17/18] btrfs: Disable dax-based defrag and send Goldwyn Rodrigues 2019-04-29 17:26 ` Goldwyn Rodrigues -- strict thread matches above, loose matches on Subject: below -- 2019-04-16 16:41 [PATCH v3 00/18] btrfs dax support Goldwyn Rodrigues [not found] ` <20190416164154.30390-1-rgoldwyn-l3A5Bk7waGM@public.gmane.org> 2019-04-16 16:41 ` [PATCH 10/18] dax: replace mmap entry in case of CoW Goldwyn Rodrigues 2019-04-16 16:41 ` Goldwyn Rodrigues [not found] ` <20190416164154.30390-11-rgoldwyn-l3A5Bk7waGM@public.gmane.org> 2019-04-17 15:24 ` Darrick J. Wong 2019-04-17 15:24 ` Darrick J. Wong
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20190429172649.8288-11-rgoldwyn@suse.de \ --to=rgoldwyn@suse.de \ --cc=darrick.wong@oracle.com \ --cc=david@fromorbit.com \ --cc=dsterba@suse.cz \ --cc=hch@lst.de \ --cc=jack@suse.cz \ --cc=kilobyte@angband.pl \ --cc=linux-btrfs@vger.kernel.org \ --cc=linux-fsdevel@vger.kernel.org \ --cc=linux-nvdimm@lists.01.org \ --cc=nborisov@suse.com \ --cc=rgoldwyn@suse.com \ --cc=willy@infradead.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.