All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] kfence: shorten critical sections of alloc/free
@ 2021-09-30 15:37 ` Marco Elver
  0 siblings, 0 replies; 4+ messages in thread
From: Marco Elver @ 2021-09-30 15:37 UTC (permalink / raw)
  To: elver, Andrew Morton
  Cc: Alexander Potapenko, Dmitry Vyukov, Jann Horn, linux-kernel,
	linux-mm, kasan-dev

Initializing memory and setting/checking the canary bytes is relatively
expensive, and doing so in the meta->lock critical sections extends the
duration with preemption and interrupts disabled unnecessarily.

Any reads to meta->addr and meta->size in kfence_guarded_alloc() and
kfence_guarded_free() don't require locking meta->lock as long as the
object is removed from the freelist: only kfence_guarded_alloc() sets
meta->addr and meta->size after removing it from the freelist,  which
requires a preceding kfence_guarded_free() returning it to the list or
the initial state.

Therefore move reads to meta->addr and meta->size, including expensive
memory initialization using them, out of meta->lock critical sections.

Signed-off-by: Marco Elver <elver@google.com>
---
 mm/kfence/core.c | 38 +++++++++++++++++++++-----------------
 1 file changed, 21 insertions(+), 17 deletions(-)

diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index b61ef93d9f98..802905b1c89b 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -309,12 +309,19 @@ static inline bool set_canary_byte(u8 *addr)
 /* Check canary byte at @addr. */
 static inline bool check_canary_byte(u8 *addr)
 {
+	struct kfence_metadata *meta;
+	unsigned long flags;
+
 	if (likely(*addr == KFENCE_CANARY_PATTERN(addr)))
 		return true;
 
 	atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
-	kfence_report_error((unsigned long)addr, false, NULL, addr_to_metadata((unsigned long)addr),
-			    KFENCE_ERROR_CORRUPTION);
+
+	meta = addr_to_metadata((unsigned long)addr);
+	raw_spin_lock_irqsave(&meta->lock, flags);
+	kfence_report_error((unsigned long)addr, false, NULL, meta, KFENCE_ERROR_CORRUPTION);
+	raw_spin_unlock_irqrestore(&meta->lock, flags);
+
 	return false;
 }
 
@@ -324,8 +331,6 @@ static __always_inline void for_each_canary(const struct kfence_metadata *meta,
 	const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
 	unsigned long addr;
 
-	lockdep_assert_held(&meta->lock);
-
 	/*
 	 * We'll iterate over each canary byte per-side until fn() returns
 	 * false. However, we'll still iterate over the canary bytes to the
@@ -414,8 +419,9 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
 	WRITE_ONCE(meta->cache, cache);
 	meta->size = size;
 	meta->alloc_stack_hash = alloc_stack_hash;
+	raw_spin_unlock_irqrestore(&meta->lock, flags);
 
-	for_each_canary(meta, set_canary_byte);
+	alloc_covered_add(alloc_stack_hash, 1);
 
 	/* Set required struct page fields. */
 	page = virt_to_page(meta->addr);
@@ -425,11 +431,8 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
 	if (IS_ENABLED(CONFIG_SLAB))
 		page->s_mem = addr;
 
-	raw_spin_unlock_irqrestore(&meta->lock, flags);
-
-	alloc_covered_add(alloc_stack_hash, 1);
-
 	/* Memory initialization. */
+	for_each_canary(meta, set_canary_byte);
 
 	/*
 	 * We check slab_want_init_on_alloc() ourselves, rather than letting
@@ -454,6 +457,7 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
 {
 	struct kcsan_scoped_access assert_page_exclusive;
 	unsigned long flags;
+	bool init;
 
 	raw_spin_lock_irqsave(&meta->lock, flags);
 
@@ -481,6 +485,13 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
 		meta->unprotected_page = 0;
 	}
 
+	/* Mark the object as freed. */
+	metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
+	init = slab_want_init_on_free(meta->cache);
+	raw_spin_unlock_irqrestore(&meta->lock, flags);
+
+	alloc_covered_add(meta->alloc_stack_hash, -1);
+
 	/* Check canary bytes for memory corruption. */
 	for_each_canary(meta, check_canary_byte);
 
@@ -489,16 +500,9 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
 	 * data is still there, and after a use-after-free is detected, we
 	 * unprotect the page, so the data is still accessible.
 	 */
-	if (!zombie && unlikely(slab_want_init_on_free(meta->cache)))
+	if (!zombie && unlikely(init))
 		memzero_explicit(addr, meta->size);
 
-	/* Mark the object as freed. */
-	metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
-
-	raw_spin_unlock_irqrestore(&meta->lock, flags);
-
-	alloc_covered_add(meta->alloc_stack_hash, -1);
-
 	/* Protect to detect use-after-frees. */
 	kfence_protect((unsigned long)addr);
 
-- 
2.33.0.685.g46640cef36-goog


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH] kfence: shorten critical sections of alloc/free
@ 2021-09-30 15:37 ` Marco Elver
  0 siblings, 0 replies; 4+ messages in thread
From: Marco Elver @ 2021-09-30 15:37 UTC (permalink / raw)
  To: elver, Andrew Morton
  Cc: Alexander Potapenko, Dmitry Vyukov, Jann Horn, linux-kernel,
	linux-mm, kasan-dev

Initializing memory and setting/checking the canary bytes is relatively
expensive, and doing so in the meta->lock critical sections extends the
duration with preemption and interrupts disabled unnecessarily.

Any reads to meta->addr and meta->size in kfence_guarded_alloc() and
kfence_guarded_free() don't require locking meta->lock as long as the
object is removed from the freelist: only kfence_guarded_alloc() sets
meta->addr and meta->size after removing it from the freelist,  which
requires a preceding kfence_guarded_free() returning it to the list or
the initial state.

Therefore move reads to meta->addr and meta->size, including expensive
memory initialization using them, out of meta->lock critical sections.

Signed-off-by: Marco Elver <elver@google.com>
---
 mm/kfence/core.c | 38 +++++++++++++++++++++-----------------
 1 file changed, 21 insertions(+), 17 deletions(-)

diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index b61ef93d9f98..802905b1c89b 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -309,12 +309,19 @@ static inline bool set_canary_byte(u8 *addr)
 /* Check canary byte at @addr. */
 static inline bool check_canary_byte(u8 *addr)
 {
+	struct kfence_metadata *meta;
+	unsigned long flags;
+
 	if (likely(*addr == KFENCE_CANARY_PATTERN(addr)))
 		return true;
 
 	atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
-	kfence_report_error((unsigned long)addr, false, NULL, addr_to_metadata((unsigned long)addr),
-			    KFENCE_ERROR_CORRUPTION);
+
+	meta = addr_to_metadata((unsigned long)addr);
+	raw_spin_lock_irqsave(&meta->lock, flags);
+	kfence_report_error((unsigned long)addr, false, NULL, meta, KFENCE_ERROR_CORRUPTION);
+	raw_spin_unlock_irqrestore(&meta->lock, flags);
+
 	return false;
 }
 
@@ -324,8 +331,6 @@ static __always_inline void for_each_canary(const struct kfence_metadata *meta,
 	const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
 	unsigned long addr;
 
-	lockdep_assert_held(&meta->lock);
-
 	/*
 	 * We'll iterate over each canary byte per-side until fn() returns
 	 * false. However, we'll still iterate over the canary bytes to the
@@ -414,8 +419,9 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
 	WRITE_ONCE(meta->cache, cache);
 	meta->size = size;
 	meta->alloc_stack_hash = alloc_stack_hash;
+	raw_spin_unlock_irqrestore(&meta->lock, flags);
 
-	for_each_canary(meta, set_canary_byte);
+	alloc_covered_add(alloc_stack_hash, 1);
 
 	/* Set required struct page fields. */
 	page = virt_to_page(meta->addr);
@@ -425,11 +431,8 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
 	if (IS_ENABLED(CONFIG_SLAB))
 		page->s_mem = addr;
 
-	raw_spin_unlock_irqrestore(&meta->lock, flags);
-
-	alloc_covered_add(alloc_stack_hash, 1);
-
 	/* Memory initialization. */
+	for_each_canary(meta, set_canary_byte);
 
 	/*
 	 * We check slab_want_init_on_alloc() ourselves, rather than letting
@@ -454,6 +457,7 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
 {
 	struct kcsan_scoped_access assert_page_exclusive;
 	unsigned long flags;
+	bool init;
 
 	raw_spin_lock_irqsave(&meta->lock, flags);
 
@@ -481,6 +485,13 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
 		meta->unprotected_page = 0;
 	}
 
+	/* Mark the object as freed. */
+	metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
+	init = slab_want_init_on_free(meta->cache);
+	raw_spin_unlock_irqrestore(&meta->lock, flags);
+
+	alloc_covered_add(meta->alloc_stack_hash, -1);
+
 	/* Check canary bytes for memory corruption. */
 	for_each_canary(meta, check_canary_byte);
 
@@ -489,16 +500,9 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
 	 * data is still there, and after a use-after-free is detected, we
 	 * unprotect the page, so the data is still accessible.
 	 */
-	if (!zombie && unlikely(slab_want_init_on_free(meta->cache)))
+	if (!zombie && unlikely(init))
 		memzero_explicit(addr, meta->size);
 
-	/* Mark the object as freed. */
-	metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
-
-	raw_spin_unlock_irqrestore(&meta->lock, flags);
-
-	alloc_covered_add(meta->alloc_stack_hash, -1);
-
 	/* Protect to detect use-after-frees. */
 	kfence_protect((unsigned long)addr);
 
-- 
2.33.0.685.g46640cef36-goog



^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] kfence: shorten critical sections of alloc/free
  2021-09-30 15:37 ` Marco Elver
@ 2021-09-30 15:39   ` Alexander Potapenko
  -1 siblings, 0 replies; 4+ messages in thread
From: Alexander Potapenko @ 2021-09-30 15:39 UTC (permalink / raw)
  To: Marco Elver
  Cc: Andrew Morton, Dmitry Vyukov, Jann Horn, LKML,
	Linux Memory Management List, kasan-dev

On Thu, Sep 30, 2021 at 5:37 PM Marco Elver <elver@google.com> wrote:
>
> Initializing memory and setting/checking the canary bytes is relatively
> expensive, and doing so in the meta->lock critical sections extends the
> duration with preemption and interrupts disabled unnecessarily.
>
> Any reads to meta->addr and meta->size in kfence_guarded_alloc() and
> kfence_guarded_free() don't require locking meta->lock as long as the
> object is removed from the freelist: only kfence_guarded_alloc() sets
> meta->addr and meta->size after removing it from the freelist,  which
> requires a preceding kfence_guarded_free() returning it to the list or
> the initial state.
>
> Therefore move reads to meta->addr and meta->size, including expensive
> memory initialization using them, out of meta->lock critical sections.
>
> Signed-off-by: Marco Elver <elver@google.com>
Acked-by: Alexander Potapenko <glider@google.com>

> ---
>  mm/kfence/core.c | 38 +++++++++++++++++++++-----------------
>  1 file changed, 21 insertions(+), 17 deletions(-)
>
> diff --git a/mm/kfence/core.c b/mm/kfence/core.c
> index b61ef93d9f98..802905b1c89b 100644
> --- a/mm/kfence/core.c
> +++ b/mm/kfence/core.c
> @@ -309,12 +309,19 @@ static inline bool set_canary_byte(u8 *addr)
>  /* Check canary byte at @addr. */
>  static inline bool check_canary_byte(u8 *addr)
>  {
> +       struct kfence_metadata *meta;
> +       unsigned long flags;
> +
>         if (likely(*addr == KFENCE_CANARY_PATTERN(addr)))
>                 return true;
>
>         atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
> -       kfence_report_error((unsigned long)addr, false, NULL, addr_to_metadata((unsigned long)addr),
> -                           KFENCE_ERROR_CORRUPTION);
> +
> +       meta = addr_to_metadata((unsigned long)addr);
> +       raw_spin_lock_irqsave(&meta->lock, flags);
> +       kfence_report_error((unsigned long)addr, false, NULL, meta, KFENCE_ERROR_CORRUPTION);
> +       raw_spin_unlock_irqrestore(&meta->lock, flags);
> +
>         return false;
>  }
>
> @@ -324,8 +331,6 @@ static __always_inline void for_each_canary(const struct kfence_metadata *meta,
>         const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
>         unsigned long addr;
>
> -       lockdep_assert_held(&meta->lock);
> -
>         /*
>          * We'll iterate over each canary byte per-side until fn() returns
>          * false. However, we'll still iterate over the canary bytes to the
> @@ -414,8 +419,9 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
>         WRITE_ONCE(meta->cache, cache);
>         meta->size = size;
>         meta->alloc_stack_hash = alloc_stack_hash;
> +       raw_spin_unlock_irqrestore(&meta->lock, flags);
>
> -       for_each_canary(meta, set_canary_byte);
> +       alloc_covered_add(alloc_stack_hash, 1);
>
>         /* Set required struct page fields. */
>         page = virt_to_page(meta->addr);
> @@ -425,11 +431,8 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
>         if (IS_ENABLED(CONFIG_SLAB))
>                 page->s_mem = addr;
>
> -       raw_spin_unlock_irqrestore(&meta->lock, flags);
> -
> -       alloc_covered_add(alloc_stack_hash, 1);
> -
>         /* Memory initialization. */
> +       for_each_canary(meta, set_canary_byte);
>
>         /*
>          * We check slab_want_init_on_alloc() ourselves, rather than letting
> @@ -454,6 +457,7 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
>  {
>         struct kcsan_scoped_access assert_page_exclusive;
>         unsigned long flags;
> +       bool init;
>
>         raw_spin_lock_irqsave(&meta->lock, flags);
>
> @@ -481,6 +485,13 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
>                 meta->unprotected_page = 0;
>         }
>
> +       /* Mark the object as freed. */
> +       metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
> +       init = slab_want_init_on_free(meta->cache);
> +       raw_spin_unlock_irqrestore(&meta->lock, flags);
> +
> +       alloc_covered_add(meta->alloc_stack_hash, -1);
> +
>         /* Check canary bytes for memory corruption. */
>         for_each_canary(meta, check_canary_byte);
>
> @@ -489,16 +500,9 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
>          * data is still there, and after a use-after-free is detected, we
>          * unprotect the page, so the data is still accessible.
>          */
> -       if (!zombie && unlikely(slab_want_init_on_free(meta->cache)))
> +       if (!zombie && unlikely(init))
>                 memzero_explicit(addr, meta->size);
>
> -       /* Mark the object as freed. */
> -       metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
> -
> -       raw_spin_unlock_irqrestore(&meta->lock, flags);
> -
> -       alloc_covered_add(meta->alloc_stack_hash, -1);
> -
>         /* Protect to detect use-after-frees. */
>         kfence_protect((unsigned long)addr);
>
> --
> 2.33.0.685.g46640cef36-goog
>


-- 
Alexander Potapenko
Software Engineer

Google Germany GmbH
Erika-Mann-Straße, 33
80636 München

Geschäftsführer: Paul Manicle, Halimah DeLaine Prado
Registergericht und -nummer: Hamburg, HRB 86891
Sitz der Gesellschaft: Hamburg

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] kfence: shorten critical sections of alloc/free
@ 2021-09-30 15:39   ` Alexander Potapenko
  0 siblings, 0 replies; 4+ messages in thread
From: Alexander Potapenko @ 2021-09-30 15:39 UTC (permalink / raw)
  To: Marco Elver
  Cc: Andrew Morton, Dmitry Vyukov, Jann Horn, LKML,
	Linux Memory Management List, kasan-dev

On Thu, Sep 30, 2021 at 5:37 PM Marco Elver <elver@google.com> wrote:
>
> Initializing memory and setting/checking the canary bytes is relatively
> expensive, and doing so in the meta->lock critical sections extends the
> duration with preemption and interrupts disabled unnecessarily.
>
> Any reads to meta->addr and meta->size in kfence_guarded_alloc() and
> kfence_guarded_free() don't require locking meta->lock as long as the
> object is removed from the freelist: only kfence_guarded_alloc() sets
> meta->addr and meta->size after removing it from the freelist,  which
> requires a preceding kfence_guarded_free() returning it to the list or
> the initial state.
>
> Therefore move reads to meta->addr and meta->size, including expensive
> memory initialization using them, out of meta->lock critical sections.
>
> Signed-off-by: Marco Elver <elver@google.com>
Acked-by: Alexander Potapenko <glider@google.com>

> ---
>  mm/kfence/core.c | 38 +++++++++++++++++++++-----------------
>  1 file changed, 21 insertions(+), 17 deletions(-)
>
> diff --git a/mm/kfence/core.c b/mm/kfence/core.c
> index b61ef93d9f98..802905b1c89b 100644
> --- a/mm/kfence/core.c
> +++ b/mm/kfence/core.c
> @@ -309,12 +309,19 @@ static inline bool set_canary_byte(u8 *addr)
>  /* Check canary byte at @addr. */
>  static inline bool check_canary_byte(u8 *addr)
>  {
> +       struct kfence_metadata *meta;
> +       unsigned long flags;
> +
>         if (likely(*addr == KFENCE_CANARY_PATTERN(addr)))
>                 return true;
>
>         atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
> -       kfence_report_error((unsigned long)addr, false, NULL, addr_to_metadata((unsigned long)addr),
> -                           KFENCE_ERROR_CORRUPTION);
> +
> +       meta = addr_to_metadata((unsigned long)addr);
> +       raw_spin_lock_irqsave(&meta->lock, flags);
> +       kfence_report_error((unsigned long)addr, false, NULL, meta, KFENCE_ERROR_CORRUPTION);
> +       raw_spin_unlock_irqrestore(&meta->lock, flags);
> +
>         return false;
>  }
>
> @@ -324,8 +331,6 @@ static __always_inline void for_each_canary(const struct kfence_metadata *meta,
>         const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
>         unsigned long addr;
>
> -       lockdep_assert_held(&meta->lock);
> -
>         /*
>          * We'll iterate over each canary byte per-side until fn() returns
>          * false. However, we'll still iterate over the canary bytes to the
> @@ -414,8 +419,9 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
>         WRITE_ONCE(meta->cache, cache);
>         meta->size = size;
>         meta->alloc_stack_hash = alloc_stack_hash;
> +       raw_spin_unlock_irqrestore(&meta->lock, flags);
>
> -       for_each_canary(meta, set_canary_byte);
> +       alloc_covered_add(alloc_stack_hash, 1);
>
>         /* Set required struct page fields. */
>         page = virt_to_page(meta->addr);
> @@ -425,11 +431,8 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
>         if (IS_ENABLED(CONFIG_SLAB))
>                 page->s_mem = addr;
>
> -       raw_spin_unlock_irqrestore(&meta->lock, flags);
> -
> -       alloc_covered_add(alloc_stack_hash, 1);
> -
>         /* Memory initialization. */
> +       for_each_canary(meta, set_canary_byte);
>
>         /*
>          * We check slab_want_init_on_alloc() ourselves, rather than letting
> @@ -454,6 +457,7 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
>  {
>         struct kcsan_scoped_access assert_page_exclusive;
>         unsigned long flags;
> +       bool init;
>
>         raw_spin_lock_irqsave(&meta->lock, flags);
>
> @@ -481,6 +485,13 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
>                 meta->unprotected_page = 0;
>         }
>
> +       /* Mark the object as freed. */
> +       metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
> +       init = slab_want_init_on_free(meta->cache);
> +       raw_spin_unlock_irqrestore(&meta->lock, flags);
> +
> +       alloc_covered_add(meta->alloc_stack_hash, -1);
> +
>         /* Check canary bytes for memory corruption. */
>         for_each_canary(meta, check_canary_byte);
>
> @@ -489,16 +500,9 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
>          * data is still there, and after a use-after-free is detected, we
>          * unprotect the page, so the data is still accessible.
>          */
> -       if (!zombie && unlikely(slab_want_init_on_free(meta->cache)))
> +       if (!zombie && unlikely(init))
>                 memzero_explicit(addr, meta->size);
>
> -       /* Mark the object as freed. */
> -       metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
> -
> -       raw_spin_unlock_irqrestore(&meta->lock, flags);
> -
> -       alloc_covered_add(meta->alloc_stack_hash, -1);
> -
>         /* Protect to detect use-after-frees. */
>         kfence_protect((unsigned long)addr);
>
> --
> 2.33.0.685.g46640cef36-goog
>


-- 
Alexander Potapenko
Software Engineer

Google Germany GmbH
Erika-Mann-Straße, 33
80636 München

Geschäftsführer: Paul Manicle, Halimah DeLaine Prado
Registergericht und -nummer: Hamburg, HRB 86891
Sitz der Gesellschaft: Hamburg


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2021-09-30 15:40 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-09-30 15:37 [PATCH] kfence: shorten critical sections of alloc/free Marco Elver
2021-09-30 15:37 ` Marco Elver
2021-09-30 15:39 ` Alexander Potapenko
2021-09-30 15:39   ` Alexander Potapenko

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.