aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2023-09-11 10:39:11 -0400
committerChuck Lever <chuck.lever@oracle.com>2023-10-16 12:44:04 -0400
commit5ff817b23534dd3942f881ab01dd5050505517aa (patch)
tree68645765fa9aa791b5ccd0f39dc687ab2d1b8b6e /net/sunrpc
parentfa341560ca7458f4396d5a0771cb5f2358d8535d (diff)
downloadlinux-5ff817b23534dd3942f881ab01dd5050505517aa.tar.gz
SUNRPC: add list of idle threads
Rather than searching a list of threads to find an idle one, having a list of idle threads allows an idle thread to be found immediately. This adds some spin_lock calls which is not ideal, but as the hold-time is tiny it is still faster than searching a list. A future patch will remove them using llist.h. This involves some subtlety and so is left to a separate patch. This removes the need for the RQ_BUSY flag. The rqst is "busy" precisely when it is not on the "idle" list. Signed-off-by: NeilBrown <neilb@suse.de> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/svc.c14
-rw-r--r--net/sunrpc/svc_xprt.c15
2 files changed, 20 insertions, 9 deletions
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index db579bbc0a0a8d..9d080fe2dcdfa5 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -510,6 +510,7 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
pool->sp_id = i;
INIT_LIST_HEAD(&pool->sp_sockets);
INIT_LIST_HEAD(&pool->sp_all_threads);
+ INIT_LIST_HEAD(&pool->sp_idle_threads);
spin_lock_init(&pool->sp_lock);
percpu_counter_init(&pool->sp_messages_arrived, 0, GFP_KERNEL);
@@ -641,7 +642,7 @@ svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node)
folio_batch_init(&rqstp->rq_fbatch);
- __set_bit(RQ_BUSY, &rqstp->rq_flags);
+ svc_thread_set_busy(rqstp);
rqstp->rq_server = serv;
rqstp->rq_pool = pool;
@@ -702,10 +703,13 @@ void svc_pool_wake_idle_thread(struct svc_pool *pool)
struct svc_rqst *rqstp;
rcu_read_lock();
- list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
- if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags))
- continue;
-
+ spin_lock_bh(&pool->sp_lock);
+ rqstp = list_first_entry_or_null(&pool->sp_idle_threads,
+ struct svc_rqst, rq_idle);
+ if (rqstp)
+ list_del_init(&rqstp->rq_idle);
+ spin_unlock_bh(&pool->sp_lock);
+ if (rqstp) {
WRITE_ONCE(rqstp->rq_qtime, ktime_get());
wake_up_process(rqstp->rq_task);
rcu_read_unlock();
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index b8539545fefdb1..ebfeeb504a7955 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -737,8 +737,9 @@ static void svc_rqst_wait_for_work(struct svc_rqst *rqstp)
set_current_state(TASK_IDLE);
smp_mb__before_atomic();
clear_bit(SP_CONGESTED, &pool->sp_flags);
- clear_bit(RQ_BUSY, &rqstp->rq_flags);
- smp_mb__after_atomic();
+ spin_lock_bh(&pool->sp_lock);
+ list_add(&rqstp->rq_idle, &pool->sp_idle_threads);
+ spin_unlock_bh(&pool->sp_lock);
/* Need to check should_sleep() again after
* setting task state in case a wakeup happened
@@ -751,8 +752,14 @@ static void svc_rqst_wait_for_work(struct svc_rqst *rqstp)
cond_resched();
}
- set_bit(RQ_BUSY, &rqstp->rq_flags);
- smp_mb__after_atomic();
+ /* We *must* be removed from the list before we can continue.
+ * If we were woken, this is already done
+ */
+ if (!svc_thread_busy(rqstp)) {
+ spin_lock_bh(&pool->sp_lock);
+ list_del_init(&rqstp->rq_idle);
+ spin_unlock_bh(&pool->sp_lock);
+ }
} else {
cond_resched();
}