All of lore.kernel.org
 help / color / mirror / Atom feed
From: andrey.konovalov@linux.dev
To: Marco Elver <elver@google.com>, Alexander Potapenko <glider@google.com>
Cc: Andrey Konovalov <andreyknvl@gmail.com>,
	Dmitry Vyukov <dvyukov@google.com>,
	Vlastimil Babka <vbabka@suse.cz>,
	kasan-dev@googlegroups.com, Evgenii Stepanov <eugenis@google.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	Andrey Konovalov <andreyknvl@google.com>
Subject: [PATCH 10/15] stackdepot: store free stack records in a freelist
Date: Tue, 29 Aug 2023 19:11:20 +0200	[thread overview]
Message-ID: <0853a38f849f75a428a76fe9bcd093c0502d26f4.1693328501.git.andreyknvl@google.com> (raw)
In-Reply-To: <cover.1693328501.git.andreyknvl@google.com>

From: Andrey Konovalov <andreyknvl@google.com>

Instead of using the global pool_offset variable to find a free slot
when storing a new stack record, mainlain a freelist of free slots
within the allocated stack pools.

A global next_stack variable is used as the head of the freelist, and
the next field in the stack_record struct is reused as freelist link
(when the record is not in the freelist, this field is used as a link
in the hash table).

This is preparatory patch for implementing the eviction of stack records
from the stack depot.

Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
---
 lib/stackdepot.c | 130 +++++++++++++++++++++++++++++------------------
 1 file changed, 81 insertions(+), 49 deletions(-)

diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 5982ea79939d..9011f4adcf20 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -55,8 +55,8 @@ union handle_parts {
 };
 
 struct stack_record {
-	struct stack_record *next;	/* Link in the hash table */
-	u32 hash;			/* Hash in the hash table */
+	struct stack_record *next;	/* Link in hash table or freelist */
+	u32 hash;			/* Hash in hash table */
 	u32 size;			/* Number of stored frames */
 	union handle_parts handle;
 	unsigned long entries[DEPOT_STACK_MAX_FRAMES];	/* Frames */
@@ -88,10 +88,10 @@ static unsigned int stack_hash_mask;
 static void *stack_pools[DEPOT_MAX_POOLS];
 /* Newly allocated pool that is not yet added to stack_pools. */
 static void *new_pool;
-/* Currently used pool in stack_pools. */
-static int pool_index;
-/* Offset to the unused space in the currently used pool. */
-static size_t pool_offset;
+/* Number of pools in stack_pools. */
+static int pools_num;
+/* Next stack in the freelist of stack records within stack_pools. */
+static struct stack_record *next_stack;
 /* Lock that protects the variables above. */
 static DEFINE_RAW_SPINLOCK(pool_lock);
 /*
@@ -221,6 +221,41 @@ int stack_depot_init(void)
 }
 EXPORT_SYMBOL_GPL(stack_depot_init);
 
+/* Initializes a stack depol pool. */
+static void depot_init_pool(void *pool)
+{
+	const int records_in_pool = DEPOT_POOL_SIZE / DEPOT_STACK_RECORD_SIZE;
+	int i, offset;
+
+	/* Initialize handles and link stack records to each other. */
+	for (i = 0, offset = 0; offset < DEPOT_POOL_SIZE;
+	     i++, offset += DEPOT_STACK_RECORD_SIZE) {
+		struct stack_record *stack = pool + offset;
+
+		stack->handle.pool_index = pools_num;
+		stack->handle.offset = offset >> DEPOT_STACK_ALIGN;
+		stack->handle.extra = 0;
+
+		if (i < records_in_pool - 1)
+			stack->next = (void *)stack + DEPOT_STACK_RECORD_SIZE;
+		else
+			stack->next = NULL;
+	}
+
+	/* Link stack records into the freelist. */
+	WARN_ON(next_stack);
+	next_stack = pool;
+
+	/* Save reference to the pool to be used by depot_fetch_stack. */
+	stack_pools[pools_num] = pool;
+
+	/*
+	 * WRITE_ONCE pairs with potential concurrent read in
+	 * depot_fetch_stack.
+	 */
+	WRITE_ONCE(pools_num, pools_num + 1);
+}
+
 /* Keeps the preallocated memory to be used for a new stack depot pool. */
 static void depot_keep_new_pool(void **prealloc)
 {
@@ -237,7 +272,7 @@ static void depot_keep_new_pool(void **prealloc)
 	 * Use the preallocated memory for the new pool
 	 * as long as we do not exceed the maximum number of pools.
 	 */
-	if (pool_index + 1 < DEPOT_MAX_POOLS) {
+	if (pools_num < DEPOT_MAX_POOLS) {
 		new_pool = *prealloc;
 		*prealloc = NULL;
 	}
@@ -252,45 +287,42 @@ static void depot_keep_new_pool(void **prealloc)
 }
 
 /* Updates refences to the current and the next stack depot pools. */
-static bool depot_update_pools(size_t required_size, void **prealloc)
+static bool depot_update_pools(void **prealloc)
 {
-	/* Check if there is not enough space in the current pool. */
-	if (unlikely(pool_offset + required_size > DEPOT_POOL_SIZE)) {
-		/* Bail out if we reached the pool limit. */
-		if (unlikely(pool_index + 1 >= DEPOT_MAX_POOLS)) {
-			WARN_ONCE(1, "Stack depot reached limit capacity");
-			return false;
-		}
+	/* Check if we still have objects in the freelist. */
+	if (next_stack)
+		goto out_keep_prealloc;
 
-		/*
-		 * Move on to the new pool.
-		 * WRITE_ONCE pairs with potential concurrent read in
-		 * stack_depot_fetch.
-		 */
-		WRITE_ONCE(pool_index, pool_index + 1);
-		stack_pools[pool_index] = new_pool;
+	/* Check if we have a new pool saved and use it. */
+	if (new_pool) {
+		depot_init_pool(new_pool);
 		new_pool = NULL;
-		pool_offset = 0;
 
-		/*
-		 * If the maximum number of pools is not reached, take note
-		 * that yet another new pool needs to be allocated.
-		 * smp_store_release pairs with smp_load_acquire in
-		 * stack_depot_save.
-		 */
-		if (pool_index + 1 < DEPOT_MAX_POOLS)
+		/* Take note that we might need a new new_pool. */
+		if (pools_num < DEPOT_MAX_POOLS)
 			smp_store_release(&new_pool_required, 1);
+
+		/* Try keeping the preallocated memory for new_pool. */
+		goto out_keep_prealloc;
+	}
+
+	/* Bail out if we reached the pool limit. */
+	if (unlikely(pools_num >= DEPOT_MAX_POOLS)) {
+		WARN_ONCE(1, "Stack depot reached limit capacity");
+		return false;
 	}
 
-	/* Check if the current pool is not yet allocated. */
-	if (*prealloc && stack_pools[pool_index] == NULL) {
-		/* Use the preallocated memory for the current pool. */
-		stack_pools[pool_index] = *prealloc;
+	/* Check if we have preallocated memory and use it. */
+	if (*prealloc) {
+		depot_init_pool(*prealloc);
 		*prealloc = NULL;
 		return true;
 	}
 
-	/* Otherwise, try using the preallocated memory for a new pool. */
+	return false;
+
+out_keep_prealloc:
+	/* Keep the preallocated memory for a new pool if required. */
 	if (*prealloc)
 		depot_keep_new_pool(prealloc);
 	return true;
@@ -301,35 +333,35 @@ static struct stack_record *
 depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
 {
 	struct stack_record *stack;
-	size_t required_size = DEPOT_STACK_RECORD_SIZE;
 
 	/* Update current and new pools if required and possible. */
-	if (!depot_update_pools(required_size, prealloc))
+	if (!depot_update_pools(prealloc))
 		return NULL;
 
-	/* Check if we have a pool to save the stack trace. */
-	if (stack_pools[pool_index] == NULL)
+	/* Check if we have a stack record to save the stack trace. */
+	stack = next_stack;
+	if (!stack)
 		return NULL;
 
+	/* Advance the freelist. */
+	next_stack = stack->next;
+
 	/* Limit number of saved frames to DEPOT_STACK_MAX_FRAMES. */
 	if (size > DEPOT_STACK_MAX_FRAMES)
 		size = DEPOT_STACK_MAX_FRAMES;
 
 	/* Save the stack trace. */
-	stack = stack_pools[pool_index] + pool_offset;
+	stack->next = NULL;
 	stack->hash = hash;
 	stack->size = size;
-	stack->handle.pool_index = pool_index;
-	stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN;
-	stack->handle.extra = 0;
+	/* stack->handle is already filled in by depot_init_pool. */
 	memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
-	pool_offset += required_size;
 
 	/*
 	 * Let KMSAN know the stored stack record is initialized. This shall
 	 * prevent false positive reports if instrumented code accesses it.
 	 */
-	kmsan_unpoison_memory(stack, required_size);
+	kmsan_unpoison_memory(stack, DEPOT_STACK_RECORD_SIZE);
 
 	return stack;
 }
@@ -339,16 +371,16 @@ static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
 	union handle_parts parts = { .handle = handle };
 	/*
 	 * READ_ONCE pairs with potential concurrent write in
-	 * depot_update_pools.
+	 * depot_init_pool.
 	 */
-	int pool_index_cached = READ_ONCE(pool_index);
+	int pools_num_cached = READ_ONCE(pools_num);
 	void *pool;
 	size_t offset = parts.offset << DEPOT_STACK_ALIGN;
 	struct stack_record *stack;
 
-	if (parts.pool_index > pool_index_cached) {
+	if (parts.pool_index > pools_num_cached) {
 		WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
-			parts.pool_index, pool_index_cached, handle);
+			parts.pool_index, pools_num_cached, handle);
 		return NULL;
 	}
 
-- 
2.25.1


  parent reply	other threads:[~2023-08-29 17:13 UTC|newest]

Thread overview: 50+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-08-29 17:11 [PATCH 00/15] stackdepot: allow evicting stack traces andrey.konovalov
2023-08-29 17:11 ` [PATCH 01/15] stackdepot: check disabled flag when fetching andrey.konovalov
2023-08-30  7:40   ` Alexander Potapenko
2023-08-29 17:11 ` [PATCH 02/15] stackdepot: simplify __stack_depot_save andrey.konovalov
2023-08-30  7:41   ` Alexander Potapenko
2023-08-29 17:11 ` [PATCH 03/15] stackdepot: drop valid bit from handles andrey.konovalov
2023-08-30  7:43   ` Alexander Potapenko
2023-08-29 17:11 ` [PATCH 04/15] stackdepot: add depot_fetch_stack helper andrey.konovalov
2023-08-30  7:47   ` Alexander Potapenko
2023-08-29 17:11 ` [PATCH 05/15] stackdepot: use fixed-sized slots for stack records andrey.konovalov
2023-08-30  8:21   ` Alexander Potapenko
2023-09-13 17:07     ` Andrey Konovalov
2023-09-15 10:36       ` Alexander Potapenko
2023-08-29 17:11 ` [PATCH 06/15] stackdepot: fix and clean-up atomic annotations andrey.konovalov
2023-08-30  8:34   ` Marco Elver
2023-09-04 18:45     ` Andrey Konovalov
2023-08-29 17:11 ` [PATCH 07/15] stackdepot: rework helpers for depot_alloc_stack andrey.konovalov
2023-08-29 17:11 ` [PATCH 08/15] stackdepot: rename next_pool_required to new_pool_required andrey.konovalov
2023-08-30  8:29   ` Alexander Potapenko
2023-08-29 17:11 ` [PATCH 09/15] stackdepot: store next pool pointer in new_pool andrey.konovalov
2023-08-29 17:11 ` andrey.konovalov [this message]
2023-08-29 17:11 ` [PATCH 11/15] stackdepot: use read/write lock andrey.konovalov
2023-08-30  9:13   ` Marco Elver
2023-09-04 18:46     ` Andrey Konovalov
2023-09-05 16:19       ` Marco Elver
2023-09-13 17:08         ` Andrey Konovalov
2024-01-02 12:59           ` Marco Elver
2024-01-09  3:27             ` Andrey Konovalov
2023-08-29 17:11 ` [PATCH 12/15] stackdepot: add refcount for records andrey.konovalov
2023-08-30  9:32   ` Marco Elver
2023-09-04 18:46     ` Andrey Konovalov
2023-09-04 18:55       ` Marco Elver
2023-09-13 17:07         ` Andrey Konovalov
2023-09-01 13:06   ` Kuan-Ying Lee (李冠穎)
2023-09-04 18:46     ` Andrey Konovalov
2023-08-29 17:11 ` [PATCH 13/15] stackdepot: add backwards links to hash table buckets andrey.konovalov
2023-08-30  9:24   ` Marco Elver
2023-09-13 17:07     ` Andrey Konovalov
2023-08-29 17:11 ` [PATCH 14/15] stackdepot: allow users to evict stack traces andrey.konovalov
2023-08-30  9:20   ` Marco Elver
2023-09-04 18:47     ` Andrey Konovalov
2023-08-29 17:11 ` [PATCH 15/15] kasan: use stack_depot_evict for tag-based modes andrey.konovalov
2023-08-30  9:38   ` Marco Elver
2023-09-04 18:48     ` Andrey Konovalov
2023-09-04 18:58       ` Marco Elver
2023-09-13 17:08         ` Andrey Konovalov
2023-08-30  7:46 ` [PATCH 00/15] stackdepot: allow evicting stack traces Vlastimil Babka
2023-09-04 18:45   ` Andrey Konovalov
2023-09-05  2:48     ` Kuan-Ying Lee (李冠穎)
2023-09-13 17:10       ` Andrey Konovalov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=0853a38f849f75a428a76fe9bcd093c0502d26f4.1693328501.git.andreyknvl@google.com \
    --to=andrey.konovalov@linux.dev \
    --cc=akpm@linux-foundation.org \
    --cc=andreyknvl@gmail.com \
    --cc=andreyknvl@google.com \
    --cc=dvyukov@google.com \
    --cc=elver@google.com \
    --cc=eugenis@google.com \
    --cc=glider@google.com \
    --cc=kasan-dev@googlegroups.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=vbabka@suse.cz \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.