aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu
diff options
context:
space:
mode:
authorUladzislau Rezki (Sony) <urezki@gmail.com>2022-10-25 16:46:12 +0200
committerPaul E. McKenney <paulmck@kernel.org>2023-01-03 17:48:40 -0800
commit04a522b7da3dbc083f8ae0aa1a6184b959a8f81c (patch)
tree492ecb846a88726dbc2221b93b6217ad8200ce5c /kernel/rcu
parent1b929c02afd37871d5afb9d498426f83432e71c2 (diff)
downloadlinux-04a522b7da3dbc083f8ae0aa1a6184b959a8f81c.tar.gz
rcu: Refactor kvfree_call_rcu() and high-level helpers
Currently a kvfree_call_rcu() takes an offset within a structure as a second parameter, so a helper such as a kvfree_rcu_arg_2() has to convert rcu_head and a freed ptr to an offset in order to pass it. That leads to an extra conversion on macro entry. Instead of converting, refactor the code in way that a pointer that has to be freed is passed directly to the kvfree_call_rcu(). This patch does not make any functional change and is transparent to all kvfree_rcu() users. Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tiny.c9
-rw-r--r--kernel/rcu/tree.c29
2 files changed, 15 insertions, 23 deletions
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index 72913ce21258b1..42f7589e51e09e 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -246,15 +246,12 @@ bool poll_state_synchronize_rcu(unsigned long oldstate)
EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
#ifdef CONFIG_KASAN_GENERIC
-void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
+void kvfree_call_rcu(struct rcu_head *head, void *ptr)
{
- if (head) {
- void *ptr = (void *) head - (unsigned long) func;
-
+ if (head)
kasan_record_aux_stack_noalloc(ptr);
- }
- __kvfree_call_rcu(head, func);
+ __kvfree_call_rcu(head, ptr);
}
EXPORT_SYMBOL_GPL(kvfree_call_rcu);
#endif
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index cf34a961821ade..7d222acd85bfd5 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3103,8 +3103,8 @@ static void kfree_rcu_work(struct work_struct *work)
* This list is named "Channel 3".
*/
for (; head; head = next) {
- unsigned long offset = (unsigned long)head->func;
- void *ptr = (void *)head - offset;
+ void *ptr = (void *) head->func;
+ unsigned long offset = (void *) head - ptr;
next = head->next;
debug_rcu_head_unqueue((struct rcu_head *)ptr);
@@ -3342,26 +3342,21 @@ add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
* be free'd in workqueue context. This allows us to: batch requests together to
* reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load.
*/
-void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
+void kvfree_call_rcu(struct rcu_head *head, void *ptr)
{
unsigned long flags;
struct kfree_rcu_cpu *krcp;
bool success;
- void *ptr;
- if (head) {
- ptr = (void *) head - (unsigned long) func;
- } else {
- /*
- * Please note there is a limitation for the head-less
- * variant, that is why there is a clear rule for such
- * objects: it can be used from might_sleep() context
- * only. For other places please embed an rcu_head to
- * your data.
- */
+ /*
+ * Please note there is a limitation for the head-less
+ * variant, that is why there is a clear rule for such
+ * objects: it can be used from might_sleep() context
+ * only. For other places please embed an rcu_head to
+ * your data.
+ */
+ if (!head)
might_sleep();
- ptr = (unsigned long *) func;
- }
// Queue the object but don't yet schedule the batch.
if (debug_rcu_head_queue(ptr)) {
@@ -3382,7 +3377,7 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
// Inline if kvfree_rcu(one_arg) call.
goto unlock_return;
- head->func = func;
+ head->func = ptr;
head->next = krcp->head;
krcp->head = head;
success = true;