aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2023-09-11 10:40:09 -0400
committerChuck Lever <chuck.lever@oracle.com>2023-10-16 12:44:07 -0400
commit2e8fc923fe476db8cab9b6458027eccb22f3b6e6 (patch)
tree5b143877a907eb433062042d60d09f469b2a9728 /net/sunrpc
parent9a0e6accc0a8c3adf72f1b43be8019961b68663a (diff)
downloadlinux-2e8fc923fe476db8cab9b6458027eccb22f3b6e6.tar.gz
SUNRPC: change sp_nrthreads to atomic_t
Using an atomic_t avoids the need to take a spinlock (which can soon be removed). Choosing a thread to kill needs to be careful as we cannot set the "die now" bit atomically with the test on the count. Instead we temporarily increase the count. Signed-off-by: NeilBrown <neilb@suse.de> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/svc.c37
1 files changed, 20 insertions, 17 deletions
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 244b5b9eba4d63..0928d3f918b0bb 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -681,8 +681,8 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
serv->sv_nrthreads += 1;
spin_unlock_bh(&serv->sv_lock);
+ atomic_inc(&pool->sp_nrthreads);
spin_lock_bh(&pool->sp_lock);
- pool->sp_nrthreads++;
list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
spin_unlock_bh(&pool->sp_lock);
return rqstp;
@@ -727,23 +727,24 @@ svc_pool_next(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
}
static struct svc_pool *
-svc_pool_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
+svc_pool_victim(struct svc_serv *serv, struct svc_pool *target_pool,
+ unsigned int *state)
{
+ struct svc_pool *pool;
unsigned int i;
+retry:
+ pool = target_pool;
+
if (pool != NULL) {
- spin_lock_bh(&pool->sp_lock);
- if (pool->sp_nrthreads)
+ if (atomic_inc_not_zero(&pool->sp_nrthreads))
goto found_pool;
- spin_unlock_bh(&pool->sp_lock);
return NULL;
} else {
for (i = 0; i < serv->sv_nrpools; i++) {
pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
- spin_lock_bh(&pool->sp_lock);
- if (pool->sp_nrthreads)
+ if (atomic_inc_not_zero(&pool->sp_nrthreads))
goto found_pool;
- spin_unlock_bh(&pool->sp_lock);
}
return NULL;
}
@@ -751,8 +752,12 @@ svc_pool_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *stat
found_pool:
set_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
set_bit(SP_NEED_VICTIM, &pool->sp_flags);
- spin_unlock_bh(&pool->sp_lock);
- return pool;
+ if (!atomic_dec_and_test(&pool->sp_nrthreads))
+ return pool;
+ /* Nothing left in this pool any more */
+ clear_bit(SP_NEED_VICTIM, &pool->sp_flags);
+ clear_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
+ goto retry;
}
static int
@@ -828,13 +833,10 @@ svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
int
svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
{
- if (pool == NULL) {
+ if (!pool)
nrservs -= serv->sv_nrthreads;
- } else {
- spin_lock_bh(&pool->sp_lock);
- nrservs -= pool->sp_nrthreads;
- spin_unlock_bh(&pool->sp_lock);
- }
+ else
+ nrservs -= atomic_read(&pool->sp_nrthreads);
if (nrservs > 0)
return svc_start_kthreads(serv, pool, nrservs);
@@ -921,10 +923,11 @@ svc_exit_thread(struct svc_rqst *rqstp)
struct svc_pool *pool = rqstp->rq_pool;
spin_lock_bh(&pool->sp_lock);
- pool->sp_nrthreads--;
list_del_rcu(&rqstp->rq_all);
spin_unlock_bh(&pool->sp_lock);
+ atomic_dec(&pool->sp_nrthreads);
+
spin_lock_bh(&serv->sv_lock);
serv->sv_nrthreads -= 1;
spin_unlock_bh(&serv->sv_lock);