From 01db417e0aee3e51df4f5f3775535fd1fb15e329 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 8 May 2024 19:45:30 +0200 Subject: Merging upstream version 5.10.216. Signed-off-by: Daniel Baumann --- drivers/base/power/main.c | 251 +++++++++++++++++++++++++--------------------- 1 file changed, 135 insertions(+), 116 deletions(-) (limited to 'drivers/base/power/main.c') diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 1dbaaddf5..fbc57c4fc 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -16,6 +16,7 @@ */ #define pr_fmt(fmt) "PM: " fmt +#define dev_fmt pr_fmt #include #include @@ -449,8 +450,8 @@ static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info) static void pm_dev_err(struct device *dev, pm_message_t state, const char *info, int error) { - pr_err("Device %s failed to %s%s: error %d\n", - dev_name(dev), pm_verb(state.event), info, error); + dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info, + error); } static void dpm_show_time(ktime_t starttime, pm_message_t state, int error, @@ -582,7 +583,7 @@ bool dev_pm_skip_resume(struct device *dev) } /** - * device_resume_noirq - Execute a "noirq resume" callback for given device. + * __device_resume_noirq - Execute a "noirq resume" callback for given device. * @dev: Device to handle. * @state: PM transition of the system being carried out. * @async: If true, the device is being resumed asynchronously. @@ -590,7 +591,7 @@ bool dev_pm_skip_resume(struct device *dev) * The driver of @dev will not receive interrupts while this function is being * executed. */ -static int device_resume_noirq(struct device *dev, pm_message_t state, bool async) +static void __device_resume_noirq(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -658,7 +659,13 @@ Skip: Out: complete_all(&dev->power.completion); TRACE_RESUME(error); - return error; + + if (error) { + suspend_stats.failed_resume_noirq++; + dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, state, async ? " async noirq" : " noirq", error); + } } static bool is_async(struct device *dev) @@ -671,27 +678,35 @@ static bool dpm_async_fn(struct device *dev, async_func_t func) { reinit_completion(&dev->power.completion); - if (is_async(dev)) { - get_device(dev); - async_schedule_dev(func, dev); + if (!is_async(dev)) + return false; + + get_device(dev); + + if (async_schedule_dev_nocall(func, dev)) return true; - } + + put_device(dev); return false; } static void async_resume_noirq(void *data, async_cookie_t cookie) { - struct device *dev = (struct device *)data; - int error; - - error = device_resume_noirq(dev, pm_transition, true); - if (error) - pm_dev_err(dev, pm_transition, " async", error); + struct device *dev = data; + __device_resume_noirq(dev, pm_transition, true); put_device(dev); } +static void device_resume_noirq(struct device *dev) +{ + if (dpm_async_fn(dev, async_resume_noirq)) + return; + + __device_resume_noirq(dev, pm_transition, false); +} + static void dpm_noirq_resume_devices(pm_message_t state) { struct device *dev; @@ -701,34 +716,18 @@ static void dpm_noirq_resume_devices(pm_message_t state) mutex_lock(&dpm_list_mtx); pm_transition = state; - /* - * Advanced the async threads upfront, - * in case the starting of async threads is - * delayed by non-async resuming devices. - */ - list_for_each_entry(dev, &dpm_noirq_list, power.entry) - dpm_async_fn(dev, async_resume_noirq); - while (!list_empty(&dpm_noirq_list)) { dev = to_device(dpm_noirq_list.next); get_device(dev); list_move_tail(&dev->power.entry, &dpm_late_early_list); + mutex_unlock(&dpm_list_mtx); - if (!is_async(dev)) { - int error; + device_resume_noirq(dev); - error = device_resume_noirq(dev, state, false); - if (error) { - suspend_stats.failed_resume_noirq++; - dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); - dpm_save_failed_dev(dev_name(dev)); - pm_dev_err(dev, state, " noirq", error); - } - } + put_device(dev); mutex_lock(&dpm_list_mtx); - put_device(dev); } mutex_unlock(&dpm_list_mtx); async_synchronize_full(); @@ -754,14 +753,14 @@ void dpm_resume_noirq(pm_message_t state) } /** - * device_resume_early - Execute an "early resume" callback for given device. + * __device_resume_early - Execute an "early resume" callback for given device. * @dev: Device to handle. * @state: PM transition of the system being carried out. * @async: If true, the device is being resumed asynchronously. * * Runtime PM is disabled for @dev while this function is being executed. */ -static int device_resume_early(struct device *dev, pm_message_t state, bool async) +static void __device_resume_early(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -814,21 +813,31 @@ Out: pm_runtime_enable(dev); complete_all(&dev->power.completion); - return error; + + if (error) { + suspend_stats.failed_resume_early++; + dpm_save_failed_step(SUSPEND_RESUME_EARLY); + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, state, async ? " async early" : " early", error); + } } static void async_resume_early(void *data, async_cookie_t cookie) { - struct device *dev = (struct device *)data; - int error; - - error = device_resume_early(dev, pm_transition, true); - if (error) - pm_dev_err(dev, pm_transition, " async", error); + struct device *dev = data; + __device_resume_early(dev, pm_transition, true); put_device(dev); } +static void device_resume_early(struct device *dev) +{ + if (dpm_async_fn(dev, async_resume_early)) + return; + + __device_resume_early(dev, pm_transition, false); +} + /** * dpm_resume_early - Execute "early resume" callbacks for all devices. * @state: PM transition of the system being carried out. @@ -842,33 +851,18 @@ void dpm_resume_early(pm_message_t state) mutex_lock(&dpm_list_mtx); pm_transition = state; - /* - * Advanced the async threads upfront, - * in case the starting of async threads is - * delayed by non-async resuming devices. - */ - list_for_each_entry(dev, &dpm_late_early_list, power.entry) - dpm_async_fn(dev, async_resume_early); - while (!list_empty(&dpm_late_early_list)) { dev = to_device(dpm_late_early_list.next); get_device(dev); list_move_tail(&dev->power.entry, &dpm_suspended_list); + mutex_unlock(&dpm_list_mtx); - if (!is_async(dev)) { - int error; + device_resume_early(dev); - error = device_resume_early(dev, state, false); - if (error) { - suspend_stats.failed_resume_early++; - dpm_save_failed_step(SUSPEND_RESUME_EARLY); - dpm_save_failed_dev(dev_name(dev)); - pm_dev_err(dev, state, " early", error); - } - } - mutex_lock(&dpm_list_mtx); put_device(dev); + + mutex_lock(&dpm_list_mtx); } mutex_unlock(&dpm_list_mtx); async_synchronize_full(); @@ -888,12 +882,12 @@ void dpm_resume_start(pm_message_t state) EXPORT_SYMBOL_GPL(dpm_resume_start); /** - * device_resume - Execute "resume" callbacks for given device. + * __device_resume - Execute "resume" callbacks for given device. * @dev: Device to handle. * @state: PM transition of the system being carried out. * @async: If true, the device is being resumed asynchronously. */ -static int device_resume(struct device *dev, pm_message_t state, bool async) +static void __device_resume(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -975,20 +969,30 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) TRACE_RESUME(error); - return error; + if (error) { + suspend_stats.failed_resume++; + dpm_save_failed_step(SUSPEND_RESUME); + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, state, async ? " async" : "", error); + } } static void async_resume(void *data, async_cookie_t cookie) { - struct device *dev = (struct device *)data; - int error; + struct device *dev = data; - error = device_resume(dev, pm_transition, true); - if (error) - pm_dev_err(dev, pm_transition, " async", error); + __device_resume(dev, pm_transition, true); put_device(dev); } +static void device_resume(struct device *dev) +{ + if (dpm_async_fn(dev, async_resume)) + return; + + __device_resume(dev, pm_transition, false); +} + /** * dpm_resume - Execute "resume" callbacks for non-sysdev devices. * @state: PM transition of the system being carried out. @@ -1008,30 +1012,25 @@ void dpm_resume(pm_message_t state) pm_transition = state; async_error = 0; - list_for_each_entry(dev, &dpm_suspended_list, power.entry) - dpm_async_fn(dev, async_resume); - while (!list_empty(&dpm_suspended_list)) { dev = to_device(dpm_suspended_list.next); + get_device(dev); - if (!is_async(dev)) { - int error; - mutex_unlock(&dpm_list_mtx); + mutex_unlock(&dpm_list_mtx); - error = device_resume(dev, state, false); - if (error) { - suspend_stats.failed_resume++; - dpm_save_failed_step(SUSPEND_RESUME); - dpm_save_failed_dev(dev_name(dev)); - pm_dev_err(dev, state, "", error); - } + device_resume(dev); + + mutex_lock(&dpm_list_mtx); - mutex_lock(&dpm_list_mtx); - } if (!list_empty(&dev->power.entry)) list_move_tail(&dev->power.entry, &dpm_prepared_list); + + mutex_unlock(&dpm_list_mtx); + put_device(dev); + + mutex_lock(&dpm_list_mtx); } mutex_unlock(&dpm_list_mtx); async_synchronize_full(); @@ -1109,14 +1108,16 @@ void dpm_complete(pm_message_t state) get_device(dev); dev->power.is_prepared = false; list_move(&dev->power.entry, &list); + mutex_unlock(&dpm_list_mtx); trace_device_pm_callback_start(dev, "", state.event); device_complete(dev, state); trace_device_pm_callback_end(dev, 0); - mutex_lock(&dpm_list_mtx); put_device(dev); + + mutex_lock(&dpm_list_mtx); } list_splice(&list, &dpm_list); mutex_unlock(&dpm_list_mtx); @@ -1262,7 +1263,7 @@ Complete: static void async_suspend_noirq(void *data, async_cookie_t cookie) { - struct device *dev = (struct device *)data; + struct device *dev = data; int error; error = __device_suspend_noirq(dev, pm_transition, true); @@ -1301,17 +1302,21 @@ static int dpm_noirq_suspend_devices(pm_message_t state) error = device_suspend_noirq(dev); mutex_lock(&dpm_list_mtx); + if (error) { pm_dev_err(dev, state, " noirq", error); dpm_save_failed_dev(dev_name(dev)); - put_device(dev); - break; - } - if (!list_empty(&dev->power.entry)) + } else if (!list_empty(&dev->power.entry)) { list_move(&dev->power.entry, &dpm_noirq_list); + } + + mutex_unlock(&dpm_list_mtx); + put_device(dev); - if (async_error) + mutex_lock(&dpm_list_mtx); + + if (error || async_error) break; } mutex_unlock(&dpm_list_mtx); @@ -1441,7 +1446,7 @@ Complete: static void async_suspend_late(void *data, async_cookie_t cookie) { - struct device *dev = (struct device *)data; + struct device *dev = data; int error; error = __device_suspend_late(dev, pm_transition, true); @@ -1478,23 +1483,28 @@ int dpm_suspend_late(pm_message_t state) struct device *dev = to_device(dpm_suspended_list.prev); get_device(dev); + mutex_unlock(&dpm_list_mtx); error = device_suspend_late(dev); mutex_lock(&dpm_list_mtx); + if (!list_empty(&dev->power.entry)) list_move(&dev->power.entry, &dpm_late_early_list); if (error) { pm_dev_err(dev, state, " late", error); dpm_save_failed_dev(dev_name(dev)); - put_device(dev); - break; } + + mutex_unlock(&dpm_list_mtx); + put_device(dev); - if (async_error) + mutex_lock(&dpm_list_mtx); + + if (error || async_error) break; } mutex_unlock(&dpm_list_mtx); @@ -1712,7 +1722,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) static void async_suspend(void *data, async_cookie_t cookie) { - struct device *dev = (struct device *)data; + struct device *dev = data; int error; error = __device_suspend(dev, pm_transition, true); @@ -1754,21 +1764,27 @@ int dpm_suspend(pm_message_t state) struct device *dev = to_device(dpm_prepared_list.prev); get_device(dev); + mutex_unlock(&dpm_list_mtx); error = device_suspend(dev); mutex_lock(&dpm_list_mtx); + if (error) { pm_dev_err(dev, state, "", error); dpm_save_failed_dev(dev_name(dev)); - put_device(dev); - break; - } - if (!list_empty(&dev->power.entry)) + } else if (!list_empty(&dev->power.entry)) { list_move(&dev->power.entry, &dpm_suspended_list); + } + + mutex_unlock(&dpm_list_mtx); + put_device(dev); - if (async_error) + + mutex_lock(&dpm_list_mtx); + + if (error || async_error) break; } mutex_unlock(&dpm_list_mtx); @@ -1881,10 +1897,11 @@ int dpm_prepare(pm_message_t state) device_block_probing(); mutex_lock(&dpm_list_mtx); - while (!list_empty(&dpm_list)) { + while (!list_empty(&dpm_list) && !error) { struct device *dev = to_device(dpm_list.next); get_device(dev); + mutex_unlock(&dpm_list_mtx); trace_device_pm_callback_start(dev, "", state.event); @@ -1892,21 +1909,23 @@ int dpm_prepare(pm_message_t state) trace_device_pm_callback_end(dev, error); mutex_lock(&dpm_list_mtx); - if (error) { - if (error == -EAGAIN) { - put_device(dev); - error = 0; - continue; - } - pr_info("Device %s not prepared for power transition: code %d\n", - dev_name(dev), error); - put_device(dev); - break; + + if (!error) { + dev->power.is_prepared = true; + if (!list_empty(&dev->power.entry)) + list_move_tail(&dev->power.entry, &dpm_prepared_list); + } else if (error == -EAGAIN) { + error = 0; + } else { + dev_info(dev, "not prepared for power transition: code %d\n", + error); } - dev->power.is_prepared = true; - if (!list_empty(&dev->power.entry)) - list_move_tail(&dev->power.entry, &dpm_prepared_list); + + mutex_unlock(&dpm_list_mtx); + put_device(dev); + + mutex_lock(&dpm_list_mtx); } mutex_unlock(&dpm_list_mtx); trace_suspend_resume(TPS("dpm_prepare"), state.event, false); -- cgit v1.2.3