aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2023-08-07 15:57:23 -1000
committerTejun Heo <tj@kernel.org>2023-08-07 15:57:23 -1000
commitee1ceef72754427e5167743108c52f826fa4ca5b (patch)
tree5e89b0dbc63f713efdf1673ad57872b46b1dbe95 /kernel/workqueue.c
parentfe089f87cccb066e8ad20f49ddf05e95adc1fa8d (diff)
downloadlinux-ee1ceef72754427e5167743108c52f826fa4ca5b.tar.gz
workqueue: Rename wq->cpu_pwqs to wq->cpu_pwq
wq->cpu_pwqs is a percpu variable carraying one pointer to a pool_workqueue. The field name being plural is unusual and confusing. Rename it to singular. This patch doesn't cause any functional changes. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 1332bd545b929..ea94ad63386e9 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -321,7 +321,7 @@ struct workqueue_struct {
/* hot fields used during command issue, aligned to cacheline */
unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */
- struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
+ struct pool_workqueue __percpu *cpu_pwq; /* I: per-cpu pwqs */
struct pool_workqueue __rcu *numa_pwq_tbl[]; /* PWR: unbound pwqs indexed by node */
};
@@ -1635,7 +1635,7 @@ retry:
} else {
if (req_cpu == WORK_CPU_UNBOUND)
cpu = raw_smp_processor_id();
- pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
+ pwq = per_cpu_ptr(wq->cpu_pwq, cpu);
}
pool = pwq->pool;
@@ -3826,7 +3826,7 @@ static void rcu_free_wq(struct rcu_head *rcu)
wq_free_lockdep(wq);
if (!(wq->flags & WQ_UNBOUND))
- free_percpu(wq->cpu_pwqs);
+ free_percpu(wq->cpu_pwq);
else
free_workqueue_attrs(wq->unbound_attrs);
@@ -4518,13 +4518,13 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
int cpu, ret;
if (!(wq->flags & WQ_UNBOUND)) {
- wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
- if (!wq->cpu_pwqs)
+ wq->cpu_pwq = alloc_percpu(struct pool_workqueue);
+ if (!wq->cpu_pwq)
return -ENOMEM;
for_each_possible_cpu(cpu) {
struct pool_workqueue *pwq =
- per_cpu_ptr(wq->cpu_pwqs, cpu);
+ per_cpu_ptr(wq->cpu_pwq, cpu);
struct worker_pool *cpu_pools =
per_cpu(cpu_worker_pools, cpu);
@@ -4905,7 +4905,7 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
cpu = smp_processor_id();
if (!(wq->flags & WQ_UNBOUND))
- pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
+ pwq = per_cpu_ptr(wq->cpu_pwq, cpu);
else
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));