aboutsummaryrefslogtreecommitdiffstats
path: root/io_uring/alloc_cache.h
diff options
context:
space:
mode:
authorBreno Leitao <leitao@debian.org>2023-02-23 08:43:52 -0800
committerJens Axboe <axboe@kernel.dk>2023-04-03 07:16:12 -0600
commitefba1a9e653e107577a48157b5424878c46f2285 (patch)
tree2a70cd4738906ed7ddf0187cfdc5e3b422cdb7c4 /io_uring/alloc_cache.h
parentda64d6db3bd304d44d7ac1eb7f319a1cc7efd611 (diff)
downloadlinux-efba1a9e653e107577a48157b5424878c46f2285.tar.gz
io_uring: Move from hlist to io_wq_work_node
Having cache entries linked using the hlist format brings no benefit, and also requires an unnecessary extra pointer address per cache entry. Use the internal io_wq_work_node single-linked list for the internal alloc caches (async_msghdr and async_poll) This is required to be able to use KASAN on cache entries, since we do not need to touch unused (and poisoned) cache entries when adding more entries to the list. Suggested-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Breno Leitao <leitao@debian.org> Link: https://lore.kernel.org/r/20230223164353.2839177-2-leitao@debian.org Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/alloc_cache.h')
-rw-r--r--io_uring/alloc_cache.h24
1 files changed, 13 insertions, 11 deletions
diff --git a/io_uring/alloc_cache.h b/io_uring/alloc_cache.h
index c2cde88aeed53..aaa838c31d92d 100644
--- a/io_uring/alloc_cache.h
+++ b/io_uring/alloc_cache.h
@@ -7,7 +7,7 @@
#define IO_ALLOC_CACHE_MAX 512
struct io_cache_entry {
- struct hlist_node node;
+ struct io_wq_work_node node;
};
static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
@@ -15,7 +15,7 @@ static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
{
if (cache->nr_cached < IO_ALLOC_CACHE_MAX) {
cache->nr_cached++;
- hlist_add_head(&entry->node, &cache->list);
+ wq_stack_add_head(&entry->node, &cache->list);
return true;
}
return false;
@@ -23,12 +23,13 @@ static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache)
{
- if (!hlist_empty(&cache->list)) {
- struct hlist_node *node = cache->list.first;
+ if (cache->list.next) {
+ struct io_cache_entry *entry;
- hlist_del(node);
+ entry = container_of(cache->list.next, struct io_cache_entry, node);
+ cache->list.next = cache->list.next->next;
cache->nr_cached--;
- return container_of(node, struct io_cache_entry, node);
+ return entry;
}
return NULL;
@@ -36,18 +37,19 @@ static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *c
static inline void io_alloc_cache_init(struct io_alloc_cache *cache)
{
- INIT_HLIST_HEAD(&cache->list);
+ cache->list.next = NULL;
cache->nr_cached = 0;
}
static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
void (*free)(struct io_cache_entry *))
{
- while (!hlist_empty(&cache->list)) {
- struct hlist_node *node = cache->list.first;
+ while (1) {
+ struct io_cache_entry *entry = io_alloc_cache_get(cache);
- hlist_del(node);
- free(container_of(node, struct io_cache_entry, node));
+ if (!entry)
+ break;
+ free(entry);
}
cache->nr_cached = 0;
}