aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2023-08-07 15:57:22 -1000
committerTejun Heo <tj@kernel.org>2023-08-07 15:57:22 -1000
commitc0ab017d43f4c4147f7ecf3ca3cb872a416e17c7 (patch)
tree09bb413ada6b38a4592366325b12acae55a27ab6 /kernel/workqueue.c
parentbc8b50c2dfac946c1beed782c1823e52cf55a352 (diff)
downloadlinux-c0ab017d43f4c4147f7ecf3ca3cb872a416e17c7.tar.gz
workqueue: Cleanups around process_scheduled_works()
* Drop the trivial optimization in worker_thread() where it bypasses calling process_scheduled_works() if the first work item isn't linked. This is a mostly pointless micro optimization and gets in the way of improving the work processing path. * Consolidate pool->watchdog_ts updates in the two callers into process_scheduled_works(). Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c29
1 files changed, 11 insertions, 18 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 598009ad97d1a..20722de9937a0 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2652,9 +2652,15 @@ __acquires(&pool->lock)
*/
static void process_scheduled_works(struct worker *worker)
{
- while (!list_empty(&worker->scheduled)) {
- struct work_struct *work = list_first_entry(&worker->scheduled,
- struct work_struct, entry);
+ struct work_struct *work;
+ bool first = true;
+
+ while ((work = list_first_entry_or_null(&worker->scheduled,
+ struct work_struct, entry))) {
+ if (first) {
+ worker->pool->watchdog_ts = jiffies;
+ first = false;
+ }
process_one_work(worker, work);
}
}
@@ -2735,17 +2741,8 @@ recheck:
list_first_entry(&pool->worklist,
struct work_struct, entry);
- pool->watchdog_ts = jiffies;
-
- if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
- /* optimization path, not strictly necessary */
- process_one_work(worker, work);
- if (unlikely(!list_empty(&worker->scheduled)))
- process_scheduled_works(worker);
- } else {
- move_linked_works(work, &worker->scheduled, NULL);
- process_scheduled_works(worker);
- }
+ move_linked_works(work, &worker->scheduled, NULL);
+ process_scheduled_works(worker);
} while (keep_working(pool));
worker_set_flags(worker, WORKER_PREP);
@@ -2820,7 +2817,6 @@ repeat:
struct pool_workqueue, mayday_node);
struct worker_pool *pool = pwq->pool;
struct work_struct *work, *n;
- bool first = true;
__set_current_state(TASK_RUNNING);
list_del_init(&pwq->mayday_node);
@@ -2838,12 +2834,9 @@ repeat:
WARN_ON_ONCE(!list_empty(scheduled));
list_for_each_entry_safe(work, n, &pool->worklist, entry) {
if (get_work_pwq(work) == pwq) {
- if (first)
- pool->watchdog_ts = jiffies;
move_linked_works(work, scheduled, &n);
pwq->stats[PWQ_STAT_RESCUED]++;
}
- first = false;
}
if (!list_empty(scheduled)) {