aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2023-08-07 15:57:24 -1000
committerTejun Heo <tj@kernel.org>2023-08-07 15:57:24 -1000
commita86feae6195ac2148097b063f7fdad8ee1f6dad4 (patch)
treeefc26cee82429b2edd51316c7d74f8ddc23300e9 /kernel/workqueue.c
parentfef59c9cab6ac5592da54f6c2b1195418f14e4d0 (diff)
downloadlinux-a86feae6195ac2148097b063f7fdad8ee1f6dad4.tar.gz
workqueue: Move wq_pod_init() below workqueue_init()
wq_pod_init() is called from workqueue_init() and responsible for initializing unbound CPU pods according to NUMA node. Workqueue is in the process of improving affinity awareness and wants to use other topology information to initialize unbound CPU pods; however, unlike NUMA nodes, other topology information isn't yet available in workqueue_init(). The next patch will introduce a later stage init function for workqueue which will be responsible for initializing unbound CPU pods. Relocate wq_pod_init() below workqueue_init() where the new init function is going to be located so that the diff can show the content differences. Just a relocation. No functional changes. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c78
1 files changed, 40 insertions, 38 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 8a484f606e74d8..1e528b7e12c562 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -6256,44 +6256,7 @@ static inline void wq_watchdog_init(void) { }
#endif /* CONFIG_WQ_WATCHDOG */
-static void __init wq_pod_init(void)
-{
- cpumask_var_t *tbl;
- int node, cpu;
-
- if (num_possible_nodes() <= 1)
- return;
-
- for_each_possible_cpu(cpu) {
- if (WARN_ON(cpu_to_node(cpu) == NUMA_NO_NODE)) {
- pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
- return;
- }
- }
-
- wq_update_pod_attrs_buf = alloc_workqueue_attrs();
- BUG_ON(!wq_update_pod_attrs_buf);
-
- /*
- * We want masks of possible CPUs of each node which isn't readily
- * available. Build one from cpu_to_node() which should have been
- * fully initialized by now.
- */
- tbl = kcalloc(nr_node_ids, sizeof(tbl[0]), GFP_KERNEL);
- BUG_ON(!tbl);
-
- for_each_node(node)
- BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
- node_online(node) ? node : NUMA_NO_NODE));
-
- for_each_possible_cpu(cpu) {
- node = cpu_to_node(cpu);
- cpumask_set_cpu(cpu, tbl[node]);
- }
-
- wq_pod_cpus = tbl;
- wq_pod_enabled = true;
-}
+static void wq_pod_init(void);
/**
* workqueue_init_early - early init for workqueue subsystem
@@ -6474,6 +6437,45 @@ void __init workqueue_init(void)
wq_watchdog_init();
}
+static void __init wq_pod_init(void)
+{
+ cpumask_var_t *tbl;
+ int node, cpu;
+
+ if (num_possible_nodes() <= 1)
+ return;
+
+ for_each_possible_cpu(cpu) {
+ if (WARN_ON(cpu_to_node(cpu) == NUMA_NO_NODE)) {
+ pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
+ return;
+ }
+ }
+
+ wq_update_pod_attrs_buf = alloc_workqueue_attrs();
+ BUG_ON(!wq_update_pod_attrs_buf);
+
+ /*
+ * We want masks of possible CPUs of each node which isn't readily
+ * available. Build one from cpu_to_node() which should have been
+ * fully initialized by now.
+ */
+ tbl = kcalloc(nr_node_ids, sizeof(tbl[0]), GFP_KERNEL);
+ BUG_ON(!tbl);
+
+ for_each_node(node)
+ BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
+ node_online(node) ? node : NUMA_NO_NODE));
+
+ for_each_possible_cpu(cpu) {
+ node = cpu_to_node(cpu);
+ cpumask_set_cpu(cpu, tbl[node]);
+ }
+
+ wq_pod_cpus = tbl;
+ wq_pod_enabled = true;
+}
+
void __warn_flushing_systemwide_wq(void)
{
pr_warn("WARNING: Flushing system-wide workqueues will be prohibited in near future.\n");