io-uring.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 0/2] io_uring: Add KASAN support for alloc caches
@ 2023-02-22 18:00 Breno Leitao
  2023-02-22 18:00 ` [PATCH v2 1/2] io_uring: Move from hlist to io_wq_work_node Breno Leitao
  2023-02-22 18:00 ` [PATCH v2 2/2] io_uring: Add KASAN support for alloc_caches Breno Leitao
  0 siblings, 2 replies; 4+ messages in thread
From: Breno Leitao @ 2023-02-22 18:00 UTC (permalink / raw)
  To: axboe, asml.silence, io-uring
  Cc: linux-kernel, gustavold, leit, kasan-dev, Breno Leitao

From: Breno Leitao <leit@fb.com>

This patchset enables KASAN for alloc cache buffers. These buffers are
used by apoll and netmsg code path. These buffers will now be poisoned
when not used, so, if randomly touched, a KASAN warning will pop up.

This patchset moves the alloc_cache from using double linked list to single
linked list, so, we do not need to touch the poisoned node when adding
or deleting a sibling node.

Changes from v1 to v2:
   * Get rid of an extra "struct io_wq_work_node" variable in
     io_alloc_cache_get() (suggested by Pavel Begunkov)
   * Removing assignement during "if" checks (suggested by Pavel Begunkov
     and Jens Axboe)
   * Do not use network structs if CONFIG_NET is disabled (as reported
     by kernel test robot)

Breno Leitao (2):
  io_uring: Move from hlist to io_wq_work_node
  io_uring: Add KASAN support for alloc_caches

 include/linux/io_uring_types.h |  2 +-
 io_uring/alloc_cache.h         | 35 +++++++++++++++++++---------------
 io_uring/io_uring.c            | 14 ++++++++++++--
 io_uring/net.c                 |  2 +-
 io_uring/net.h                 |  4 ----
 io_uring/poll.c                |  2 +-
 6 files changed, 35 insertions(+), 24 deletions(-)

-- 
2.30.2


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH v2 1/2] io_uring: Move from hlist to io_wq_work_node
  2023-02-22 18:00 [PATCH v2 0/2] io_uring: Add KASAN support for alloc caches Breno Leitao
@ 2023-02-22 18:00 ` Breno Leitao
  2023-02-22 18:00 ` [PATCH v2 2/2] io_uring: Add KASAN support for alloc_caches Breno Leitao
  1 sibling, 0 replies; 4+ messages in thread
From: Breno Leitao @ 2023-02-22 18:00 UTC (permalink / raw)
  To: axboe, asml.silence, io-uring
  Cc: linux-kernel, gustavold, leit, kasan-dev, Breno Leitao

From: Breno Leitao <leit@fb.com>

Having cache entries linked using the hlist format brings no benefit, and
also requires an unnecessary extra pointer address per cache entry.

Use the internal io_wq_work_node single-linked list for the internal
alloc caches (async_msghdr and async_poll)

This is required to be able to use KASAN on cache entries, since we do
not need to touch unused (and poisoned) cache entries when adding more
entries to the list.

Suggested-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Breno Leitao <leitao@debian.org>
---
 include/linux/io_uring_types.h |  2 +-
 io_uring/alloc_cache.h         | 26 +++++++++++++-------------
 2 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 0efe4d784358..efa66b6c32c9 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -188,7 +188,7 @@ struct io_ev_fd {
 };
 
 struct io_alloc_cache {
-	struct hlist_head	list;
+	struct io_wq_work_node	list;
 	unsigned int		nr_cached;
 };
 
diff --git a/io_uring/alloc_cache.h b/io_uring/alloc_cache.h
index 729793ae9712..ae61eb383cae 100644
--- a/io_uring/alloc_cache.h
+++ b/io_uring/alloc_cache.h
@@ -7,7 +7,7 @@
 #define IO_ALLOC_CACHE_MAX	512
 
 struct io_cache_entry {
-	struct hlist_node	node;
+	struct io_wq_work_node node;
 };
 
 static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
@@ -15,7 +15,7 @@ static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
 {
 	if (cache->nr_cached < IO_ALLOC_CACHE_MAX) {
 		cache->nr_cached++;
-		hlist_add_head(&entry->node, &cache->list);
+		wq_stack_add_head(&entry->node, &cache->list);
 		return true;
 	}
 	return false;
@@ -23,11 +23,11 @@ static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
 
 static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache)
 {
-	if (!hlist_empty(&cache->list)) {
-		struct hlist_node *node = cache->list.first;
-
-		hlist_del(node);
-		return container_of(node, struct io_cache_entry, node);
+	if (cache->list.next) {
+		struct io_cache_entry *entry;
+		entry = container_of(cache->list.next, struct io_cache_entry, node);
+		cache->list.next = cache->list.next->next;
+		return entry;
 	}
 
 	return NULL;
@@ -35,18 +35,18 @@ static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *c
 
 static inline void io_alloc_cache_init(struct io_alloc_cache *cache)
 {
-	INIT_HLIST_HEAD(&cache->list);
+	cache->list.next = NULL;
 	cache->nr_cached = 0;
 }
 
 static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
 					void (*free)(struct io_cache_entry *))
 {
-	while (!hlist_empty(&cache->list)) {
-		struct hlist_node *node = cache->list.first;
-
-		hlist_del(node);
-		free(container_of(node, struct io_cache_entry, node));
+	while (1) {
+		struct io_cache_entry *entry = io_alloc_cache_get(cache);
+		if (!entry)
+			break;
+		free(entry);
 	}
 	cache->nr_cached = 0;
 }
-- 
2.30.2


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH v2 2/2] io_uring: Add KASAN support for alloc_caches
  2023-02-22 18:00 [PATCH v2 0/2] io_uring: Add KASAN support for alloc caches Breno Leitao
  2023-02-22 18:00 ` [PATCH v2 1/2] io_uring: Move from hlist to io_wq_work_node Breno Leitao
@ 2023-02-22 18:00 ` Breno Leitao
  2023-02-22 18:30   ` Jens Axboe
  1 sibling, 1 reply; 4+ messages in thread
From: Breno Leitao @ 2023-02-22 18:00 UTC (permalink / raw)
  To: axboe, asml.silence, io-uring
  Cc: linux-kernel, gustavold, leit, kasan-dev, Breno Leitao

From: Breno Leitao <leit@fb.com>

Add support for KASAN in the alloc_caches (apoll and netmsg_cache).
Thus, if something touches the unused caches, it will raise a KASAN
warning/exception.

It poisons the object when the object is put to the cache, and unpoisons
it when the object is gotten or freed.

Signed-off-by: Breno Leitao <leitao@debian.org>
---
 io_uring/alloc_cache.h | 11 ++++++++---
 io_uring/io_uring.c    | 14 ++++++++++++--
 io_uring/net.c         |  2 +-
 io_uring/net.h         |  4 ----
 io_uring/poll.c        |  2 +-
 5 files changed, 22 insertions(+), 11 deletions(-)

diff --git a/io_uring/alloc_cache.h b/io_uring/alloc_cache.h
index ae61eb383cae..6c6bdde6306b 100644
--- a/io_uring/alloc_cache.h
+++ b/io_uring/alloc_cache.h
@@ -16,16 +16,20 @@ static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
 	if (cache->nr_cached < IO_ALLOC_CACHE_MAX) {
 		cache->nr_cached++;
 		wq_stack_add_head(&entry->node, &cache->list);
+		/* KASAN poisons object */
+		kasan_slab_free_mempool(entry);
 		return true;
 	}
 	return false;
 }
 
-static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache)
+static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache,
+							size_t size)
 {
 	if (cache->list.next) {
 		struct io_cache_entry *entry;
 		entry = container_of(cache->list.next, struct io_cache_entry, node);
+		kasan_unpoison_range(entry, size);
 		cache->list.next = cache->list.next->next;
 		return entry;
 	}
@@ -40,10 +44,11 @@ static inline void io_alloc_cache_init(struct io_alloc_cache *cache)
 }
 
 static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
-					void (*free)(struct io_cache_entry *))
+					void (*free)(struct io_cache_entry *),
+					size_t size)
 {
 	while (1) {
-		struct io_cache_entry *entry = io_alloc_cache_get(cache);
+		struct io_cache_entry *entry = io_alloc_cache_get(cache, size);
 		if (!entry)
 			break;
 		free(entry);
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 80b6204769e8..01367145689b 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2766,6 +2766,17 @@ static void io_req_caches_free(struct io_ring_ctx *ctx)
 	mutex_unlock(&ctx->uring_lock);
 }
 
+static __cold void io_uring_acache_free(struct io_ring_ctx *ctx)
+{
+
+	io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free,
+			    sizeof(struct async_poll));
+#ifdef CONFIG_NET
+	io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free,
+			    sizeof(struct io_async_msghdr));
+#endif
+}
+
 static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
 {
 	io_sq_thread_finish(ctx);
@@ -2781,8 +2792,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
 		__io_sqe_files_unregister(ctx);
 	io_cqring_overflow_kill(ctx);
 	io_eventfd_unregister(ctx);
-	io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free);
-	io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
+	io_uring_acache_free(ctx);
 	mutex_unlock(&ctx->uring_lock);
 	io_destroy_buffers(ctx);
 	if (ctx->sq_creds)
diff --git a/io_uring/net.c b/io_uring/net.c
index fbc34a7c2743..8dc67b23b030 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -139,7 +139,7 @@ static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
 	struct io_async_msghdr *hdr;
 
 	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
-		entry = io_alloc_cache_get(&ctx->netmsg_cache);
+		entry = io_alloc_cache_get(&ctx->netmsg_cache, sizeof(struct io_async_msghdr));
 		if (entry) {
 			hdr = container_of(entry, struct io_async_msghdr, cache);
 			hdr->free_iov = NULL;
diff --git a/io_uring/net.h b/io_uring/net.h
index 5ffa11bf5d2e..d8359de84996 100644
--- a/io_uring/net.h
+++ b/io_uring/net.h
@@ -62,8 +62,4 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 void io_send_zc_cleanup(struct io_kiocb *req);
 
 void io_netmsg_cache_free(struct io_cache_entry *entry);
-#else
-static inline void io_netmsg_cache_free(struct io_cache_entry *entry)
-{
-}
 #endif
diff --git a/io_uring/poll.c b/io_uring/poll.c
index 8339a92b4510..295d59875f00 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -661,7 +661,7 @@ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
 		apoll = req->apoll;
 		kfree(apoll->double_poll);
 	} else if (!(issue_flags & IO_URING_F_UNLOCKED)) {
-		entry = io_alloc_cache_get(&ctx->apoll_cache);
+		entry = io_alloc_cache_get(&ctx->apoll_cache, sizeof(struct async_poll));
 		if (entry == NULL)
 			goto alloc_apoll;
 		apoll = container_of(entry, struct async_poll, cache);
-- 
2.30.2


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH v2 2/2] io_uring: Add KASAN support for alloc_caches
  2023-02-22 18:00 ` [PATCH v2 2/2] io_uring: Add KASAN support for alloc_caches Breno Leitao
@ 2023-02-22 18:30   ` Jens Axboe
  0 siblings, 0 replies; 4+ messages in thread
From: Jens Axboe @ 2023-02-22 18:30 UTC (permalink / raw)
  To: Breno Leitao, asml.silence, io-uring
  Cc: linux-kernel, gustavold, leit, kasan-dev, Breno Leitao

On 2/22/23 11:00?AM, Breno Leitao wrote:
> -static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache)
> +static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache,
> +							size_t size)
>  {
>  	if (cache->list.next) {
>  		struct io_cache_entry *entry;
>  		entry = container_of(cache->list.next, struct io_cache_entry, node);
> +		kasan_unpoison_range(entry, size);
>  		cache->list.next = cache->list.next->next;
>  		return entry;
>  	}

Does this generate the same code if KASAN isn't enabled? Since there's a
4-byte hole in struct io_alloc_cache(), might be cleaner to simply add
the 'size' argument to io_alloc_cache_init() and store it in the cache.
Then the above just becomes:

	kasan_unpoison_range(entry, cache->elem_size);

instead and that'd definitely generate the same code as before if KASAN
isn't enabled.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2023-02-22 18:30 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-02-22 18:00 [PATCH v2 0/2] io_uring: Add KASAN support for alloc caches Breno Leitao
2023-02-22 18:00 ` [PATCH v2 1/2] io_uring: Move from hlist to io_wq_work_node Breno Leitao
2023-02-22 18:00 ` [PATCH v2 2/2] io_uring: Add KASAN support for alloc_caches Breno Leitao
2023-02-22 18:30   ` Jens Axboe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).