aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-01-09 16:32:11 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2024-01-09 16:32:11 -0800
commit7da71072e1d6967c0482abcbb5991ffb5953fdf2 (patch)
treec5755f86dc2fee064525748bae960705715d4a11 /drivers/base
parent7f73ba68cf67ef533783013f863d750c5736f957 (diff)
parentf1e5e4639781724d05d90309900321baaecfde74 (diff)
downloadlinux-7da71072e1d6967c0482abcbb5991ffb5953fdf2.tar.gz
Merge tag 'pm-6.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management updates from Rafael Wysocki: "These add support for new processors (Sierra Forest, Grand Ridge and Meteor Lake) to the intel_idle driver, make intel_pstate run on Emerald Rapids without HWP support and adjust it to utilize EPP values supplied by the platform firmware, fix issues, clean up code and improve documentation. The most significant fix addresses deadlocks in the core system-wide resume code that occur if async_schedule_dev() attempts to run its argument function synchronously (for example, due to a memory allocation failure). It rearranges the code in question which may increase the system resume time in some cases, but this basically is a removal of a premature optimization. That optimization will be added back later, but properly this time. Specifics: - Add support for the Sierra Forest, Grand Ridge and Meteorlake SoCs to the intel_idle cpuidle driver (Artem Bityutskiy, Zhang Rui) - Do not enable interrupts when entering idle in the haltpoll cpuidle driver (Borislav Petkov) - Add Emerald Rapids support in no-HWP mode to the intel_pstate cpufreq driver (Zhenguo Yao) - Use EPP values programmed by the platform firmware as balanced performance ones by default in intel_pstate (Srinivas Pandruvada) - Add a missing function return value check to the SCMI cpufreq driver to avoid unexpected behavior (Alexandra Diupina) - Fix parameter type warning in the armada-8k cpufreq driver (Gregory CLEMENT) - Rework trans_stat_show() in the devfreq core code to avoid buffer overflows (Christian Marangi) - Synchronize devfreq_monitor_[start/stop] so as to prevent a timer list corruption from occurring when devfreq governors are switched frequently (Mukesh Ojha) - Fix possible deadlocks in the core system-wide PM code that occur if device-handling functions cannot be executed asynchronously during resume from system-wide suspend (Rafael J. Wysocki) - Clean up unnecessary local variable initializations in multiple places in the hibernation code (Wang chaodong, Li zeming) - Adjust core hibernation code to avoid missing wakeup events that occur after saving an image to persistent storage (Chris Feng) - Update hibernation code to enforce correct ordering during image compression and decompression (Hongchen Zhang) - Use kmap_local_page() instead of kmap_atomic() in copy_data_page() during hibernation and restore (Chen Haonan) - Adjust documentation and code comments to reflect recent tasks freezer changes (Kevin Hao) - Repair excess function parameter description warning in the hibernation image-saving code (Randy Dunlap) - Fix _set_required_opps when opp is NULL (Bryan O'Donoghue) - Use device_get_match_data() in the OPP code for TI (Rob Herring) - Clean up OPP level and other parts and call dev_pm_opp_set_opp() recursively for required OPPs (Viresh Kumar)" * tag 'pm-6.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (35 commits) OPP: Rename 'rate_clk_single' OPP: Pass rounded rate to _set_opp() OPP: Relocate dev_pm_opp_sync_regulators() PM: sleep: Fix possible deadlocks in core system-wide PM code OPP: Move dev_pm_opp_icc_bw to internal opp.h async: Introduce async_schedule_dev_nocall() async: Split async_schedule_node_domain() cpuidle: haltpoll: Do not enable interrupts when entering idle OPP: Fix _set_required_opps when opp is NULL OPP: The level field is always of unsigned int type PM: hibernate: Repair excess function parameter description warning PM: sleep: Remove obsolete comment from unlock_system_sleep() cpufreq: intel_pstate: Add Emerald Rapids support in no-HWP mode Documentation: PM: Adjust freezing-of-tasks.rst to the freezer changes PM: hibernate: Use kmap_local_page() in copy_data_page() intel_idle: add Sierra Forest SoC support intel_idle: add Grand Ridge SoC support PM / devfreq: Synchronize devfreq_monitor_[start/stop] cpufreq: armada-8k: Fix parameter type warning PM: hibernate: Enforce ordering during image compression/decompression ...
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/power/main.c148
1 files changed, 68 insertions, 80 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index f85f3515c258fc..9c5a5f4dba5a6e 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -579,7 +579,7 @@ bool dev_pm_skip_resume(struct device *dev)
}
/**
- * device_resume_noirq - Execute a "noirq resume" callback for given device.
+ * __device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being resumed asynchronously.
@@ -587,7 +587,7 @@ bool dev_pm_skip_resume(struct device *dev)
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
+static void __device_resume_noirq(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -655,7 +655,13 @@ Skip:
Out:
complete_all(&dev->power.completion);
TRACE_RESUME(error);
- return error;
+
+ if (error) {
+ suspend_stats.failed_resume_noirq++;
+ dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
+ }
}
static bool is_async(struct device *dev)
@@ -668,11 +674,15 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
{
reinit_completion(&dev->power.completion);
- if (is_async(dev)) {
- get_device(dev);
- async_schedule_dev(func, dev);
+ if (!is_async(dev))
+ return false;
+
+ get_device(dev);
+
+ if (async_schedule_dev_nocall(func, dev))
return true;
- }
+
+ put_device(dev);
return false;
}
@@ -680,15 +690,19 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
static void async_resume_noirq(void *data, async_cookie_t cookie)
{
struct device *dev = data;
- int error;
-
- error = device_resume_noirq(dev, pm_transition, true);
- if (error)
- pm_dev_err(dev, pm_transition, " async", error);
+ __device_resume_noirq(dev, pm_transition, true);
put_device(dev);
}
+static void device_resume_noirq(struct device *dev)
+{
+ if (dpm_async_fn(dev, async_resume_noirq))
+ return;
+
+ __device_resume_noirq(dev, pm_transition, false);
+}
+
static void dpm_noirq_resume_devices(pm_message_t state)
{
struct device *dev;
@@ -698,14 +712,6 @@ static void dpm_noirq_resume_devices(pm_message_t state)
mutex_lock(&dpm_list_mtx);
pm_transition = state;
- /*
- * Advanced the async threads upfront,
- * in case the starting of async threads is
- * delayed by non-async resuming devices.
- */
- list_for_each_entry(dev, &dpm_noirq_list, power.entry)
- dpm_async_fn(dev, async_resume_noirq);
-
while (!list_empty(&dpm_noirq_list)) {
dev = to_device(dpm_noirq_list.next);
get_device(dev);
@@ -713,17 +719,7 @@ static void dpm_noirq_resume_devices(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
- if (!is_async(dev)) {
- int error;
-
- error = device_resume_noirq(dev, state, false);
- if (error) {
- suspend_stats.failed_resume_noirq++;
- dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, state, " noirq", error);
- }
- }
+ device_resume_noirq(dev);
put_device(dev);
@@ -751,14 +747,14 @@ void dpm_resume_noirq(pm_message_t state)
}
/**
- * device_resume_early - Execute an "early resume" callback for given device.
+ * __device_resume_early - Execute an "early resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being resumed asynchronously.
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
-static int device_resume_early(struct device *dev, pm_message_t state, bool async)
+static void __device_resume_early(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -811,21 +807,31 @@ Out:
pm_runtime_enable(dev);
complete_all(&dev->power.completion);
- return error;
+
+ if (error) {
+ suspend_stats.failed_resume_early++;
+ dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async early" : " early", error);
+ }
}
static void async_resume_early(void *data, async_cookie_t cookie)
{
struct device *dev = data;
- int error;
-
- error = device_resume_early(dev, pm_transition, true);
- if (error)
- pm_dev_err(dev, pm_transition, " async", error);
+ __device_resume_early(dev, pm_transition, true);
put_device(dev);
}
+static void device_resume_early(struct device *dev)
+{
+ if (dpm_async_fn(dev, async_resume_early))
+ return;
+
+ __device_resume_early(dev, pm_transition, false);
+}
+
/**
* dpm_resume_early - Execute "early resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
@@ -839,14 +845,6 @@ void dpm_resume_early(pm_message_t state)
mutex_lock(&dpm_list_mtx);
pm_transition = state;
- /*
- * Advanced the async threads upfront,
- * in case the starting of async threads is
- * delayed by non-async resuming devices.
- */
- list_for_each_entry(dev, &dpm_late_early_list, power.entry)
- dpm_async_fn(dev, async_resume_early);
-
while (!list_empty(&dpm_late_early_list)) {
dev = to_device(dpm_late_early_list.next);
get_device(dev);
@@ -854,17 +852,7 @@ void dpm_resume_early(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
- if (!is_async(dev)) {
- int error;
-
- error = device_resume_early(dev, state, false);
- if (error) {
- suspend_stats.failed_resume_early++;
- dpm_save_failed_step(SUSPEND_RESUME_EARLY);
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, state, " early", error);
- }
- }
+ device_resume_early(dev);
put_device(dev);
@@ -888,12 +876,12 @@ void dpm_resume_start(pm_message_t state)
EXPORT_SYMBOL_GPL(dpm_resume_start);
/**
- * device_resume - Execute "resume" callbacks for given device.
+ * __device_resume - Execute "resume" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being resumed asynchronously.
*/
-static int device_resume(struct device *dev, pm_message_t state, bool async)
+static void __device_resume(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -975,20 +963,30 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
TRACE_RESUME(error);
- return error;
+ if (error) {
+ suspend_stats.failed_resume++;
+ dpm_save_failed_step(SUSPEND_RESUME);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async" : "", error);
+ }
}
static void async_resume(void *data, async_cookie_t cookie)
{
struct device *dev = data;
- int error;
- error = device_resume(dev, pm_transition, true);
- if (error)
- pm_dev_err(dev, pm_transition, " async", error);
+ __device_resume(dev, pm_transition, true);
put_device(dev);
}
+static void device_resume(struct device *dev)
+{
+ if (dpm_async_fn(dev, async_resume))
+ return;
+
+ __device_resume(dev, pm_transition, false);
+}
+
/**
* dpm_resume - Execute "resume" callbacks for non-sysdev devices.
* @state: PM transition of the system being carried out.
@@ -1008,27 +1006,17 @@ void dpm_resume(pm_message_t state)
pm_transition = state;
async_error = 0;
- list_for_each_entry(dev, &dpm_suspended_list, power.entry)
- dpm_async_fn(dev, async_resume);
-
while (!list_empty(&dpm_suspended_list)) {
dev = to_device(dpm_suspended_list.next);
+
get_device(dev);
- if (!is_async(dev)) {
- int error;
- mutex_unlock(&dpm_list_mtx);
+ mutex_unlock(&dpm_list_mtx);
+
+ device_resume(dev);
- error = device_resume(dev, state, false);
- if (error) {
- suspend_stats.failed_resume++;
- dpm_save_failed_step(SUSPEND_RESUME);
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, state, "", error);
- }
+ mutex_lock(&dpm_list_mtx);
- mutex_lock(&dpm_list_mtx);
- }
if (!list_empty(&dev->power.entry))
list_move_tail(&dev->power.entry, &dpm_prepared_list);