diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:03 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:03 +0000 |
commit | 01a69402cf9d38ff180345d55c2ee51c7e89fbc7 (patch) | |
tree | b406c5242a088c4f59c6e4b719b783f43aca6ae9 /drivers/base/power/main.c | |
parent | Adding upstream version 6.7.12. (diff) | |
download | linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.tar.xz linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.zip |
Adding upstream version 6.8.9.upstream/6.8.9
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/base/power/main.c')
-rw-r--r-- | drivers/base/power/main.c | 117 |
1 files changed, 64 insertions, 53 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 9c5a5f4dba..fadcd0379d 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -579,7 +579,7 @@ bool dev_pm_skip_resume(struct device *dev) } /** - * __device_resume_noirq - Execute a "noirq resume" callback for given device. + * device_resume_noirq - Execute a "noirq resume" callback for given device. * @dev: Device to handle. * @state: PM transition of the system being carried out. * @async: If true, the device is being resumed asynchronously. @@ -587,7 +587,7 @@ bool dev_pm_skip_resume(struct device *dev) * The driver of @dev will not receive interrupts while this function is being * executed. */ -static void __device_resume_noirq(struct device *dev, pm_message_t state, bool async) +static void device_resume_noirq(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -674,16 +674,22 @@ static bool dpm_async_fn(struct device *dev, async_func_t func) { reinit_completion(&dev->power.completion); - if (!is_async(dev)) - return false; - - get_device(dev); + if (is_async(dev)) { + dev->power.async_in_progress = true; - if (async_schedule_dev_nocall(func, dev)) - return true; + get_device(dev); - put_device(dev); + if (async_schedule_dev_nocall(func, dev)) + return true; + put_device(dev); + } + /* + * Because async_schedule_dev_nocall() above has returned false or it + * has not been called at all, func() is not running and it is safe to + * update the async_in_progress flag without extra synchronization. + */ + dev->power.async_in_progress = false; return false; } @@ -691,18 +697,10 @@ static void async_resume_noirq(void *data, async_cookie_t cookie) { struct device *dev = data; - __device_resume_noirq(dev, pm_transition, true); + device_resume_noirq(dev, pm_transition, true); put_device(dev); } -static void device_resume_noirq(struct device *dev) -{ - if (dpm_async_fn(dev, async_resume_noirq)) - return; - - __device_resume_noirq(dev, pm_transition, false); -} - static void dpm_noirq_resume_devices(pm_message_t state) { struct device *dev; @@ -712,18 +710,28 @@ static void dpm_noirq_resume_devices(pm_message_t state) mutex_lock(&dpm_list_mtx); pm_transition = state; + /* + * Trigger the resume of "async" devices upfront so they don't have to + * wait for the "non-async" ones they don't depend on. + */ + list_for_each_entry(dev, &dpm_noirq_list, power.entry) + dpm_async_fn(dev, async_resume_noirq); + while (!list_empty(&dpm_noirq_list)) { dev = to_device(dpm_noirq_list.next); - get_device(dev); list_move_tail(&dev->power.entry, &dpm_late_early_list); - mutex_unlock(&dpm_list_mtx); + if (!dev->power.async_in_progress) { + get_device(dev); - device_resume_noirq(dev); + mutex_unlock(&dpm_list_mtx); - put_device(dev); + device_resume_noirq(dev, state, false); - mutex_lock(&dpm_list_mtx); + put_device(dev); + + mutex_lock(&dpm_list_mtx); + } } mutex_unlock(&dpm_list_mtx); async_synchronize_full(); @@ -747,14 +755,14 @@ void dpm_resume_noirq(pm_message_t state) } /** - * __device_resume_early - Execute an "early resume" callback for given device. + * device_resume_early - Execute an "early resume" callback for given device. * @dev: Device to handle. * @state: PM transition of the system being carried out. * @async: If true, the device is being resumed asynchronously. * * Runtime PM is disabled for @dev while this function is being executed. */ -static void __device_resume_early(struct device *dev, pm_message_t state, bool async) +static void device_resume_early(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -820,18 +828,10 @@ static void async_resume_early(void *data, async_cookie_t cookie) { struct device *dev = data; - __device_resume_early(dev, pm_transition, true); + device_resume_early(dev, pm_transition, true); put_device(dev); } -static void device_resume_early(struct device *dev) -{ - if (dpm_async_fn(dev, async_resume_early)) - return; - - __device_resume_early(dev, pm_transition, false); -} - /** * dpm_resume_early - Execute "early resume" callbacks for all devices. * @state: PM transition of the system being carried out. @@ -845,18 +845,28 @@ void dpm_resume_early(pm_message_t state) mutex_lock(&dpm_list_mtx); pm_transition = state; + /* + * Trigger the resume of "async" devices upfront so they don't have to + * wait for the "non-async" ones they don't depend on. + */ + list_for_each_entry(dev, &dpm_late_early_list, power.entry) + dpm_async_fn(dev, async_resume_early); + while (!list_empty(&dpm_late_early_list)) { dev = to_device(dpm_late_early_list.next); - get_device(dev); list_move_tail(&dev->power.entry, &dpm_suspended_list); - mutex_unlock(&dpm_list_mtx); + if (!dev->power.async_in_progress) { + get_device(dev); - device_resume_early(dev); + mutex_unlock(&dpm_list_mtx); - put_device(dev); + device_resume_early(dev, state, false); - mutex_lock(&dpm_list_mtx); + put_device(dev); + + mutex_lock(&dpm_list_mtx); + } } mutex_unlock(&dpm_list_mtx); async_synchronize_full(); @@ -876,12 +886,12 @@ void dpm_resume_start(pm_message_t state) EXPORT_SYMBOL_GPL(dpm_resume_start); /** - * __device_resume - Execute "resume" callbacks for given device. + * device_resume - Execute "resume" callbacks for given device. * @dev: Device to handle. * @state: PM transition of the system being carried out. * @async: If true, the device is being resumed asynchronously. */ -static void __device_resume(struct device *dev, pm_message_t state, bool async) +static void device_resume(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -975,18 +985,10 @@ static void async_resume(void *data, async_cookie_t cookie) { struct device *dev = data; - __device_resume(dev, pm_transition, true); + device_resume(dev, pm_transition, true); put_device(dev); } -static void device_resume(struct device *dev) -{ - if (dpm_async_fn(dev, async_resume)) - return; - - __device_resume(dev, pm_transition, false); -} - /** * dpm_resume - Execute "resume" callbacks for non-sysdev devices. * @state: PM transition of the system being carried out. @@ -1006,16 +1008,25 @@ void dpm_resume(pm_message_t state) pm_transition = state; async_error = 0; + /* + * Trigger the resume of "async" devices upfront so they don't have to + * wait for the "non-async" ones they don't depend on. + */ + list_for_each_entry(dev, &dpm_suspended_list, power.entry) + dpm_async_fn(dev, async_resume); + while (!list_empty(&dpm_suspended_list)) { dev = to_device(dpm_suspended_list.next); get_device(dev); - mutex_unlock(&dpm_list_mtx); + if (!dev->power.async_in_progress) { + mutex_unlock(&dpm_list_mtx); - device_resume(dev); + device_resume(dev, state, false); - mutex_lock(&dpm_list_mtx); + mutex_lock(&dpm_list_mtx); + } if (!list_empty(&dev->power.entry)) list_move_tail(&dev->power.entry, &dpm_prepared_list); |