summaryrefslogtreecommitdiffstats
path: root/drivers/base/power
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base/power')
-rw-r--r--drivers/base/power/Makefile9
-rw-r--r--drivers/base/power/clock_ops.c647
-rw-r--r--drivers/base/power/common.c230
-rw-r--r--drivers/base/power/domain.c3223
-rw-r--r--drivers/base/power/domain_governor.c321
-rw-r--r--drivers/base/power/generic_ops.c298
-rw-r--r--drivers/base/power/main.c2015
-rw-r--r--drivers/base/power/power.h170
-rw-r--r--drivers/base/power/qos-test.c117
-rw-r--r--drivers/base/power/qos.c982
-rw-r--r--drivers/base/power/runtime.c1892
-rw-r--r--drivers/base/power/sysfs.c838
-rw-r--r--drivers/base/power/trace.c307
-rw-r--r--drivers/base/power/wakeirq.c412
-rw-r--r--drivers/base/power/wakeup.c1214
-rw-r--r--drivers/base/power/wakeup_stats.c217
16 files changed, 12892 insertions, 0 deletions
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
new file mode 100644
index 000000000..8fdd0073e
--- /dev/null
+++ b/drivers/base/power/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o wakeirq.o
+obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o wakeup_stats.o
+obj-$(CONFIG_PM_TRACE_RTC) += trace.o
+obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
+obj-$(CONFIG_HAVE_CLK) += clock_ops.o
+obj-$(CONFIG_PM_QOS_KUNIT_TEST) += qos-test.o
+
+ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
new file mode 100644
index 000000000..ced6863a1
--- /dev/null
+++ b/drivers/base/power/clock_ops.c
@@ -0,0 +1,647 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/base/power/clock_ops.c - Generic clock manipulation PM callbacks
+ *
+ * Copyright (c) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/pm.h>
+#include <linux/pm_clock.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/of_clk.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+
+#ifdef CONFIG_PM_CLK
+
+enum pce_status {
+ PCE_STATUS_NONE = 0,
+ PCE_STATUS_ACQUIRED,
+ PCE_STATUS_ENABLED,
+ PCE_STATUS_ERROR,
+};
+
+struct pm_clock_entry {
+ struct list_head node;
+ char *con_id;
+ struct clk *clk;
+ enum pce_status status;
+};
+
+/**
+ * pm_clk_enable - Enable a clock, reporting any errors
+ * @dev: The device for the given clock
+ * @ce: PM clock entry corresponding to the clock.
+ */
+static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
+{
+ int ret;
+
+ if (ce->status < PCE_STATUS_ERROR) {
+ ret = clk_enable(ce->clk);
+ if (!ret)
+ ce->status = PCE_STATUS_ENABLED;
+ else
+ dev_err(dev, "%s: failed to enable clk %p, error %d\n",
+ __func__, ce->clk, ret);
+ }
+}
+
+/**
+ * pm_clk_acquire - Acquire a device clock.
+ * @dev: Device whose clock is to be acquired.
+ * @ce: PM clock entry corresponding to the clock.
+ */
+static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
+{
+ if (!ce->clk)
+ ce->clk = clk_get(dev, ce->con_id);
+ if (IS_ERR(ce->clk)) {
+ ce->status = PCE_STATUS_ERROR;
+ } else {
+ if (clk_prepare(ce->clk)) {
+ ce->status = PCE_STATUS_ERROR;
+ dev_err(dev, "clk_prepare() failed\n");
+ } else {
+ ce->status = PCE_STATUS_ACQUIRED;
+ dev_dbg(dev,
+ "Clock %pC con_id %s managed by runtime PM.\n",
+ ce->clk, ce->con_id);
+ }
+ }
+}
+
+static int __pm_clk_add(struct device *dev, const char *con_id,
+ struct clk *clk)
+{
+ struct pm_subsys_data *psd = dev_to_psd(dev);
+ struct pm_clock_entry *ce;
+
+ if (!psd)
+ return -EINVAL;
+
+ ce = kzalloc(sizeof(*ce), GFP_KERNEL);
+ if (!ce)
+ return -ENOMEM;
+
+ if (con_id) {
+ ce->con_id = kstrdup(con_id, GFP_KERNEL);
+ if (!ce->con_id) {
+ kfree(ce);
+ return -ENOMEM;
+ }
+ } else {
+ if (IS_ERR(clk)) {
+ kfree(ce);
+ return -ENOENT;
+ }
+ ce->clk = clk;
+ }
+
+ pm_clk_acquire(dev, ce);
+
+ spin_lock_irq(&psd->lock);
+ list_add_tail(&ce->node, &psd->clock_list);
+ spin_unlock_irq(&psd->lock);
+ return 0;
+}
+
+/**
+ * pm_clk_add - Start using a device clock for power management.
+ * @dev: Device whose clock is going to be used for power management.
+ * @con_id: Connection ID of the clock.
+ *
+ * Add the clock represented by @con_id to the list of clocks used for
+ * the power management of @dev.
+ */
+int pm_clk_add(struct device *dev, const char *con_id)
+{
+ return __pm_clk_add(dev, con_id, NULL);
+}
+EXPORT_SYMBOL_GPL(pm_clk_add);
+
+/**
+ * pm_clk_add_clk - Start using a device clock for power management.
+ * @dev: Device whose clock is going to be used for power management.
+ * @clk: Clock pointer
+ *
+ * Add the clock to the list of clocks used for the power management of @dev.
+ * The power-management code will take control of the clock reference, so
+ * callers should not call clk_put() on @clk after this function sucessfully
+ * returned.
+ */
+int pm_clk_add_clk(struct device *dev, struct clk *clk)
+{
+ return __pm_clk_add(dev, NULL, clk);
+}
+EXPORT_SYMBOL_GPL(pm_clk_add_clk);
+
+
+/**
+ * of_pm_clk_add_clk - Start using a device clock for power management.
+ * @dev: Device whose clock is going to be used for power management.
+ * @name: Name of clock that is going to be used for power management.
+ *
+ * Add the clock described in the 'clocks' device-tree node that matches
+ * with the 'name' provided, to the list of clocks used for the power
+ * management of @dev. On success, returns 0. Returns a negative error
+ * code if the clock is not found or cannot be added.
+ */
+int of_pm_clk_add_clk(struct device *dev, const char *name)
+{
+ struct clk *clk;
+ int ret;
+
+ if (!dev || !dev->of_node || !name)
+ return -EINVAL;
+
+ clk = of_clk_get_by_name(dev->of_node, name);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = pm_clk_add_clk(dev, clk);
+ if (ret) {
+ clk_put(clk);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
+
+/**
+ * of_pm_clk_add_clks - Start using device clock(s) for power management.
+ * @dev: Device whose clock(s) is going to be used for power management.
+ *
+ * Add a series of clocks described in the 'clocks' device-tree node for
+ * a device to the list of clocks used for the power management of @dev.
+ * On success, returns the number of clocks added. Returns a negative
+ * error code if there are no clocks in the device node for the device
+ * or if adding a clock fails.
+ */
+int of_pm_clk_add_clks(struct device *dev)
+{
+ struct clk **clks;
+ int i, count;
+ int ret;
+
+ if (!dev || !dev->of_node)
+ return -EINVAL;
+
+ count = of_clk_get_parent_count(dev->of_node);
+ if (count <= 0)
+ return -ENODEV;
+
+ clks = kcalloc(count, sizeof(*clks), GFP_KERNEL);
+ if (!clks)
+ return -ENOMEM;
+
+ for (i = 0; i < count; i++) {
+ clks[i] = of_clk_get(dev->of_node, i);
+ if (IS_ERR(clks[i])) {
+ ret = PTR_ERR(clks[i]);
+ goto error;
+ }
+
+ ret = pm_clk_add_clk(dev, clks[i]);
+ if (ret) {
+ clk_put(clks[i]);
+ goto error;
+ }
+ }
+
+ kfree(clks);
+
+ return i;
+
+error:
+ while (i--)
+ pm_clk_remove_clk(dev, clks[i]);
+
+ kfree(clks);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(of_pm_clk_add_clks);
+
+/**
+ * __pm_clk_remove - Destroy PM clock entry.
+ * @ce: PM clock entry to destroy.
+ */
+static void __pm_clk_remove(struct pm_clock_entry *ce)
+{
+ if (!ce)
+ return;
+
+ if (ce->status < PCE_STATUS_ERROR) {
+ if (ce->status == PCE_STATUS_ENABLED)
+ clk_disable(ce->clk);
+
+ if (ce->status >= PCE_STATUS_ACQUIRED) {
+ clk_unprepare(ce->clk);
+ clk_put(ce->clk);
+ }
+ }
+
+ kfree(ce->con_id);
+ kfree(ce);
+}
+
+/**
+ * pm_clk_remove - Stop using a device clock for power management.
+ * @dev: Device whose clock should not be used for PM any more.
+ * @con_id: Connection ID of the clock.
+ *
+ * Remove the clock represented by @con_id from the list of clocks used for
+ * the power management of @dev.
+ */
+void pm_clk_remove(struct device *dev, const char *con_id)
+{
+ struct pm_subsys_data *psd = dev_to_psd(dev);
+ struct pm_clock_entry *ce;
+
+ if (!psd)
+ return;
+
+ spin_lock_irq(&psd->lock);
+
+ list_for_each_entry(ce, &psd->clock_list, node) {
+ if (!con_id && !ce->con_id)
+ goto remove;
+ else if (!con_id || !ce->con_id)
+ continue;
+ else if (!strcmp(con_id, ce->con_id))
+ goto remove;
+ }
+
+ spin_unlock_irq(&psd->lock);
+ return;
+
+ remove:
+ list_del(&ce->node);
+ spin_unlock_irq(&psd->lock);
+
+ __pm_clk_remove(ce);
+}
+EXPORT_SYMBOL_GPL(pm_clk_remove);
+
+/**
+ * pm_clk_remove_clk - Stop using a device clock for power management.
+ * @dev: Device whose clock should not be used for PM any more.
+ * @clk: Clock pointer
+ *
+ * Remove the clock pointed to by @clk from the list of clocks used for
+ * the power management of @dev.
+ */
+void pm_clk_remove_clk(struct device *dev, struct clk *clk)
+{
+ struct pm_subsys_data *psd = dev_to_psd(dev);
+ struct pm_clock_entry *ce;
+
+ if (!psd || !clk)
+ return;
+
+ spin_lock_irq(&psd->lock);
+
+ list_for_each_entry(ce, &psd->clock_list, node) {
+ if (clk == ce->clk)
+ goto remove;
+ }
+
+ spin_unlock_irq(&psd->lock);
+ return;
+
+ remove:
+ list_del(&ce->node);
+ spin_unlock_irq(&psd->lock);
+
+ __pm_clk_remove(ce);
+}
+EXPORT_SYMBOL_GPL(pm_clk_remove_clk);
+
+/**
+ * pm_clk_init - Initialize a device's list of power management clocks.
+ * @dev: Device to initialize the list of PM clocks for.
+ *
+ * Initialize the lock and clock_list members of the device's pm_subsys_data
+ * object.
+ */
+void pm_clk_init(struct device *dev)
+{
+ struct pm_subsys_data *psd = dev_to_psd(dev);
+ if (psd)
+ INIT_LIST_HEAD(&psd->clock_list);
+}
+EXPORT_SYMBOL_GPL(pm_clk_init);
+
+/**
+ * pm_clk_create - Create and initialize a device's list of PM clocks.
+ * @dev: Device to create and initialize the list of PM clocks for.
+ *
+ * Allocate a struct pm_subsys_data object, initialize its lock and clock_list
+ * members and make the @dev's power.subsys_data field point to it.
+ */
+int pm_clk_create(struct device *dev)
+{
+ return dev_pm_get_subsys_data(dev);
+}
+EXPORT_SYMBOL_GPL(pm_clk_create);
+
+/**
+ * pm_clk_destroy - Destroy a device's list of power management clocks.
+ * @dev: Device to destroy the list of PM clocks for.
+ *
+ * Clear the @dev's power.subsys_data field, remove the list of clock entries
+ * from the struct pm_subsys_data object pointed to by it before and free
+ * that object.
+ */
+void pm_clk_destroy(struct device *dev)
+{
+ struct pm_subsys_data *psd = dev_to_psd(dev);
+ struct pm_clock_entry *ce, *c;
+ struct list_head list;
+
+ if (!psd)
+ return;
+
+ INIT_LIST_HEAD(&list);
+
+ spin_lock_irq(&psd->lock);
+
+ list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node)
+ list_move(&ce->node, &list);
+
+ spin_unlock_irq(&psd->lock);
+
+ dev_pm_put_subsys_data(dev);
+
+ list_for_each_entry_safe_reverse(ce, c, &list, node) {
+ list_del(&ce->node);
+ __pm_clk_remove(ce);
+ }
+}
+EXPORT_SYMBOL_GPL(pm_clk_destroy);
+
+/**
+ * pm_clk_suspend - Disable clocks in a device's PM clock list.
+ * @dev: Device to disable the clocks for.
+ */
+int pm_clk_suspend(struct device *dev)
+{
+ struct pm_subsys_data *psd = dev_to_psd(dev);
+ struct pm_clock_entry *ce;
+ unsigned long flags;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ if (!psd)
+ return 0;
+
+ spin_lock_irqsave(&psd->lock, flags);
+
+ list_for_each_entry_reverse(ce, &psd->clock_list, node) {
+ if (ce->status < PCE_STATUS_ERROR) {
+ if (ce->status == PCE_STATUS_ENABLED)
+ clk_disable(ce->clk);
+ ce->status = PCE_STATUS_ACQUIRED;
+ }
+ }
+
+ spin_unlock_irqrestore(&psd->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pm_clk_suspend);
+
+/**
+ * pm_clk_resume - Enable clocks in a device's PM clock list.
+ * @dev: Device to enable the clocks for.
+ */
+int pm_clk_resume(struct device *dev)
+{
+ struct pm_subsys_data *psd = dev_to_psd(dev);
+ struct pm_clock_entry *ce;
+ unsigned long flags;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ if (!psd)
+ return 0;
+
+ spin_lock_irqsave(&psd->lock, flags);
+
+ list_for_each_entry(ce, &psd->clock_list, node)
+ __pm_clk_enable(dev, ce);
+
+ spin_unlock_irqrestore(&psd->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pm_clk_resume);
+
+/**
+ * pm_clk_notify - Notify routine for device addition and removal.
+ * @nb: Notifier block object this function is a member of.
+ * @action: Operation being carried out by the caller.
+ * @data: Device the routine is being run for.
+ *
+ * For this function to work, @nb must be a member of an object of type
+ * struct pm_clk_notifier_block containing all of the requisite data.
+ * Specifically, the pm_domain member of that object is copied to the device's
+ * pm_domain field and its con_ids member is used to populate the device's list
+ * of PM clocks, depending on @action.
+ *
+ * If the device's pm_domain field is already populated with a value different
+ * from the one stored in the struct pm_clk_notifier_block object, the function
+ * does nothing.
+ */
+static int pm_clk_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct pm_clk_notifier_block *clknb;
+ struct device *dev = data;
+ char **con_id;
+ int error;
+
+ dev_dbg(dev, "%s() %ld\n", __func__, action);
+
+ clknb = container_of(nb, struct pm_clk_notifier_block, nb);
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ if (dev->pm_domain)
+ break;
+
+ error = pm_clk_create(dev);
+ if (error)
+ break;
+
+ dev_pm_domain_set(dev, clknb->pm_domain);
+ if (clknb->con_ids[0]) {
+ for (con_id = clknb->con_ids; *con_id; con_id++)
+ pm_clk_add(dev, *con_id);
+ } else {
+ pm_clk_add(dev, NULL);
+ }
+
+ break;
+ case BUS_NOTIFY_DEL_DEVICE:
+ if (dev->pm_domain != clknb->pm_domain)
+ break;
+
+ dev_pm_domain_set(dev, NULL);
+ pm_clk_destroy(dev);
+ break;
+ }
+
+ return 0;
+}
+
+int pm_clk_runtime_suspend(struct device *dev)
+{
+ int ret;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ ret = pm_generic_runtime_suspend(dev);
+ if (ret) {
+ dev_err(dev, "failed to suspend device\n");
+ return ret;
+ }
+
+ ret = pm_clk_suspend(dev);
+ if (ret) {
+ dev_err(dev, "failed to suspend clock\n");
+ pm_generic_runtime_resume(dev);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pm_clk_runtime_suspend);
+
+int pm_clk_runtime_resume(struct device *dev)
+{
+ int ret;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ ret = pm_clk_resume(dev);
+ if (ret) {
+ dev_err(dev, "failed to resume clock\n");
+ return ret;
+ }
+
+ return pm_generic_runtime_resume(dev);
+}
+EXPORT_SYMBOL_GPL(pm_clk_runtime_resume);
+
+#else /* !CONFIG_PM_CLK */
+
+/**
+ * enable_clock - Enable a device clock.
+ * @dev: Device whose clock is to be enabled.
+ * @con_id: Connection ID of the clock.
+ */
+static void enable_clock(struct device *dev, const char *con_id)
+{
+ struct clk *clk;
+
+ clk = clk_get(dev, con_id);
+ if (!IS_ERR(clk)) {
+ clk_prepare_enable(clk);
+ clk_put(clk);
+ dev_info(dev, "Runtime PM disabled, clock forced on.\n");
+ }
+}
+
+/**
+ * disable_clock - Disable a device clock.
+ * @dev: Device whose clock is to be disabled.
+ * @con_id: Connection ID of the clock.
+ */
+static void disable_clock(struct device *dev, const char *con_id)
+{
+ struct clk *clk;
+
+ clk = clk_get(dev, con_id);
+ if (!IS_ERR(clk)) {
+ clk_disable_unprepare(clk);
+ clk_put(clk);
+ dev_info(dev, "Runtime PM disabled, clock forced off.\n");
+ }
+}
+
+/**
+ * pm_clk_notify - Notify routine for device addition and removal.
+ * @nb: Notifier block object this function is a member of.
+ * @action: Operation being carried out by the caller.
+ * @data: Device the routine is being run for.
+ *
+ * For this function to work, @nb must be a member of an object of type
+ * struct pm_clk_notifier_block containing all of the requisite data.
+ * Specifically, the con_ids member of that object is used to enable or disable
+ * the device's clocks, depending on @action.
+ */
+static int pm_clk_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct pm_clk_notifier_block *clknb;
+ struct device *dev = data;
+ char **con_id;
+
+ dev_dbg(dev, "%s() %ld\n", __func__, action);
+
+ clknb = container_of(nb, struct pm_clk_notifier_block, nb);
+
+ switch (action) {
+ case BUS_NOTIFY_BIND_DRIVER:
+ if (clknb->con_ids[0]) {
+ for (con_id = clknb->con_ids; *con_id; con_id++)
+ enable_clock(dev, *con_id);
+ } else {
+ enable_clock(dev, NULL);
+ }
+ break;
+ case BUS_NOTIFY_DRIVER_NOT_BOUND:
+ case BUS_NOTIFY_UNBOUND_DRIVER:
+ if (clknb->con_ids[0]) {
+ for (con_id = clknb->con_ids; *con_id; con_id++)
+ disable_clock(dev, *con_id);
+ } else {
+ disable_clock(dev, NULL);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+#endif /* !CONFIG_PM_CLK */
+
+/**
+ * pm_clk_add_notifier - Add bus type notifier for power management clocks.
+ * @bus: Bus type to add the notifier to.
+ * @clknb: Notifier to be added to the given bus type.
+ *
+ * The nb member of @clknb is not expected to be initialized and its
+ * notifier_call member will be replaced with pm_clk_notify(). However,
+ * the remaining members of @clknb should be populated prior to calling this
+ * routine.
+ */
+void pm_clk_add_notifier(struct bus_type *bus,
+ struct pm_clk_notifier_block *clknb)
+{
+ if (!bus || !clknb)
+ return;
+
+ clknb->nb.notifier_call = pm_clk_notify;
+ bus_register_notifier(bus, &clknb->nb);
+}
+EXPORT_SYMBOL_GPL(pm_clk_add_notifier);
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
new file mode 100644
index 000000000..bbddb267c
--- /dev/null
+++ b/drivers/base/power/common.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/base/power/common.c - Common device power management code.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ */
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/pm_clock.h>
+#include <linux/acpi.h>
+#include <linux/pm_domain.h>
+
+#include "power.h"
+
+/**
+ * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device.
+ * @dev: Device to handle.
+ *
+ * If power.subsys_data is NULL, point it to a new object, otherwise increment
+ * its reference counter. Return 0 if new object has been created or refcount
+ * increased, otherwise negative error code.
+ */
+int dev_pm_get_subsys_data(struct device *dev)
+{
+ struct pm_subsys_data *psd;
+
+ psd = kzalloc(sizeof(*psd), GFP_KERNEL);
+ if (!psd)
+ return -ENOMEM;
+
+ spin_lock_irq(&dev->power.lock);
+
+ if (dev->power.subsys_data) {
+ dev->power.subsys_data->refcount++;
+ } else {
+ spin_lock_init(&psd->lock);
+ psd->refcount = 1;
+ dev->power.subsys_data = psd;
+ pm_clk_init(dev);
+ psd = NULL;
+ }
+
+ spin_unlock_irq(&dev->power.lock);
+
+ /* kfree() verifies that its argument is nonzero. */
+ kfree(psd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
+
+/**
+ * dev_pm_put_subsys_data - Drop reference to power.subsys_data.
+ * @dev: Device to handle.
+ *
+ * If the reference counter of power.subsys_data is zero after dropping the
+ * reference, power.subsys_data is removed.
+ */
+void dev_pm_put_subsys_data(struct device *dev)
+{
+ struct pm_subsys_data *psd;
+
+ spin_lock_irq(&dev->power.lock);
+
+ psd = dev_to_psd(dev);
+ if (!psd)
+ goto out;
+
+ if (--psd->refcount == 0)
+ dev->power.subsys_data = NULL;
+ else
+ psd = NULL;
+
+ out:
+ spin_unlock_irq(&dev->power.lock);
+ kfree(psd);
+}
+EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
+
+/**
+ * dev_pm_domain_attach - Attach a device to its PM domain.
+ * @dev: Device to attach.
+ * @power_on: Used to indicate whether we should power on the device.
+ *
+ * The @dev may only be attached to a single PM domain. By iterating through
+ * the available alternatives we try to find a valid PM domain for the device.
+ * As attachment succeeds, the ->detach() callback in the struct dev_pm_domain
+ * should be assigned by the corresponding attach function.
+ *
+ * This function should typically be invoked from subsystem level code during
+ * the probe phase. Especially for those that holds devices which requires
+ * power management through PM domains.
+ *
+ * Callers must ensure proper synchronization of this function with power
+ * management callbacks.
+ *
+ * Returns 0 on successfully attached PM domain, or when it is found that the
+ * device doesn't need a PM domain, else a negative error code.
+ */
+int dev_pm_domain_attach(struct device *dev, bool power_on)
+{
+ int ret;
+
+ if (dev->pm_domain)
+ return 0;
+
+ ret = acpi_dev_pm_attach(dev, power_on);
+ if (!ret)
+ ret = genpd_dev_pm_attach(dev);
+
+ return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_attach);
+
+/**
+ * dev_pm_domain_attach_by_id - Associate a device with one of its PM domains.
+ * @dev: The device used to lookup the PM domain.
+ * @index: The index of the PM domain.
+ *
+ * As @dev may only be attached to a single PM domain, the backend PM domain
+ * provider creates a virtual device to attach instead. If attachment succeeds,
+ * the ->detach() callback in the struct dev_pm_domain are assigned by the
+ * corresponding backend attach function, as to deal with detaching of the
+ * created virtual device.
+ *
+ * This function should typically be invoked by a driver during the probe phase,
+ * in case its device requires power management through multiple PM domains. The
+ * driver may benefit from using the received device, to configure device-links
+ * towards its original device. Depending on the use-case and if needed, the
+ * links may be dynamically changed by the driver, which allows it to control
+ * the power to the PM domains independently from each other.
+ *
+ * Callers must ensure proper synchronization of this function with power
+ * management callbacks.
+ *
+ * Returns the virtual created device when successfully attached to its PM
+ * domain, NULL in case @dev don't need a PM domain, else an ERR_PTR().
+ * Note that, to detach the returned virtual device, the driver shall call
+ * dev_pm_domain_detach() on it, typically during the remove phase.
+ */
+struct device *dev_pm_domain_attach_by_id(struct device *dev,
+ unsigned int index)
+{
+ if (dev->pm_domain)
+ return ERR_PTR(-EEXIST);
+
+ return genpd_dev_pm_attach_by_id(dev, index);
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_id);
+
+/**
+ * dev_pm_domain_attach_by_name - Associate a device with one of its PM domains.
+ * @dev: The device used to lookup the PM domain.
+ * @name: The name of the PM domain.
+ *
+ * For a detailed function description, see dev_pm_domain_attach_by_id().
+ */
+struct device *dev_pm_domain_attach_by_name(struct device *dev,
+ const char *name)
+{
+ if (dev->pm_domain)
+ return ERR_PTR(-EEXIST);
+
+ return genpd_dev_pm_attach_by_name(dev, name);
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_name);
+
+/**
+ * dev_pm_domain_detach - Detach a device from its PM domain.
+ * @dev: Device to detach.
+ * @power_off: Used to indicate whether we should power off the device.
+ *
+ * This functions will reverse the actions from dev_pm_domain_attach() and
+ * dev_pm_domain_attach_by_id(), thus it detaches @dev from its PM domain.
+ * Typically it should be invoked during the remove phase, either from
+ * subsystem level code or from drivers.
+ *
+ * Callers must ensure proper synchronization of this function with power
+ * management callbacks.
+ */
+void dev_pm_domain_detach(struct device *dev, bool power_off)
+{
+ if (dev->pm_domain && dev->pm_domain->detach)
+ dev->pm_domain->detach(dev, power_off);
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_detach);
+
+/**
+ * dev_pm_domain_start - Start the device through its PM domain.
+ * @dev: Device to start.
+ *
+ * This function should typically be called during probe by a subsystem/driver,
+ * when it needs to start its device from the PM domain's perspective. Note
+ * that, it's assumed that the PM domain is already powered on when this
+ * function is called.
+ *
+ * Returns 0 on success and negative error values on failures.
+ */
+int dev_pm_domain_start(struct device *dev)
+{
+ if (dev->pm_domain && dev->pm_domain->start)
+ return dev->pm_domain->start(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_start);
+
+/**
+ * dev_pm_domain_set - Set PM domain of a device.
+ * @dev: Device whose PM domain is to be set.
+ * @pd: PM domain to be set, or NULL.
+ *
+ * Sets the PM domain the device belongs to. The PM domain of a device needs
+ * to be set before its probe finishes (it's bound to a driver).
+ *
+ * This function must be called with the device lock held.
+ */
+void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd)
+{
+ if (dev->pm_domain == pd)
+ return;
+
+ WARN(pd && device_is_bound(dev),
+ "PM domains can only be changed for unbound devices\n");
+ dev->pm_domain = pd;
+ device_pm_check_callbacks(dev);
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_set);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
new file mode 100644
index 000000000..8a90f08c9
--- /dev/null
+++ b/drivers/base/power/domain.c
@@ -0,0 +1,3223 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/base/power/domain.c - Common code related to device power domains.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ */
+#define pr_fmt(fmt) "PM: " fmt
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_qos.h>
+#include <linux/pm_clock.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/suspend.h>
+#include <linux/export.h>
+#include <linux/cpu.h>
+
+#include "power.h"
+
+#define GENPD_RETRY_MAX_MS 250 /* Approximate */
+
+#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
+({ \
+ type (*__routine)(struct device *__d); \
+ type __ret = (type)0; \
+ \
+ __routine = genpd->dev_ops.callback; \
+ if (__routine) { \
+ __ret = __routine(dev); \
+ } \
+ __ret; \
+})
+
+static LIST_HEAD(gpd_list);
+static DEFINE_MUTEX(gpd_list_lock);
+
+struct genpd_lock_ops {
+ void (*lock)(struct generic_pm_domain *genpd);
+ void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
+ int (*lock_interruptible)(struct generic_pm_domain *genpd);
+ void (*unlock)(struct generic_pm_domain *genpd);
+};
+
+static void genpd_lock_mtx(struct generic_pm_domain *genpd)
+{
+ mutex_lock(&genpd->mlock);
+}
+
+static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
+ int depth)
+{
+ mutex_lock_nested(&genpd->mlock, depth);
+}
+
+static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
+{
+ return mutex_lock_interruptible(&genpd->mlock);
+}
+
+static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
+{
+ return mutex_unlock(&genpd->mlock);
+}
+
+static const struct genpd_lock_ops genpd_mtx_ops = {
+ .lock = genpd_lock_mtx,
+ .lock_nested = genpd_lock_nested_mtx,
+ .lock_interruptible = genpd_lock_interruptible_mtx,
+ .unlock = genpd_unlock_mtx,
+};
+
+static void genpd_lock_spin(struct generic_pm_domain *genpd)
+ __acquires(&genpd->slock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&genpd->slock, flags);
+ genpd->lock_flags = flags;
+}
+
+static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
+ int depth)
+ __acquires(&genpd->slock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave_nested(&genpd->slock, flags, depth);
+ genpd->lock_flags = flags;
+}
+
+static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
+ __acquires(&genpd->slock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&genpd->slock, flags);
+ genpd->lock_flags = flags;
+ return 0;
+}
+
+static void genpd_unlock_spin(struct generic_pm_domain *genpd)
+ __releases(&genpd->slock)
+{
+ spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
+}
+
+static const struct genpd_lock_ops genpd_spin_ops = {
+ .lock = genpd_lock_spin,
+ .lock_nested = genpd_lock_nested_spin,
+ .lock_interruptible = genpd_lock_interruptible_spin,
+ .unlock = genpd_unlock_spin,
+};
+
+#define genpd_lock(p) p->lock_ops->lock(p)
+#define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
+#define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
+#define genpd_unlock(p) p->lock_ops->unlock(p)
+
+#define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON)
+#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
+#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
+#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
+#define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
+#define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
+
+static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
+ const struct generic_pm_domain *genpd)
+{
+ bool ret;
+
+ ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
+
+ /*
+ * Warn once if an IRQ safe device is attached to a no sleep domain, as
+ * to indicate a suboptimal configuration for PM. For an always on
+ * domain this isn't case, thus don't warn.
+ */
+ if (ret && !genpd_is_always_on(genpd))
+ dev_warn_once(dev, "PM domain %s will not be powered off\n",
+ genpd->name);
+
+ return ret;
+}
+
+static int genpd_runtime_suspend(struct device *dev);
+
+/*
+ * Get the generic PM domain for a particular struct device.
+ * This validates the struct device pointer, the PM domain pointer,
+ * and checks that the PM domain pointer is a real generic PM domain.
+ * Any failure results in NULL being returned.
+ */
+static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
+{
+ if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
+ return NULL;
+
+ /* A genpd's always have its ->runtime_suspend() callback assigned. */
+ if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
+ return pd_to_genpd(dev->pm_domain);
+
+ return NULL;
+}
+
+/*
+ * This should only be used where we are certain that the pm_domain
+ * attached to the device is a genpd domain.
+ */
+static struct generic_pm_domain *dev_to_genpd(struct device *dev)
+{
+ if (IS_ERR_OR_NULL(dev->pm_domain))
+ return ERR_PTR(-EINVAL);
+
+ return pd_to_genpd(dev->pm_domain);
+}
+
+static int genpd_stop_dev(const struct generic_pm_domain *genpd,
+ struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
+}
+
+static int genpd_start_dev(const struct generic_pm_domain *genpd,
+ struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, start, dev);
+}
+
+static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
+{
+ bool ret = false;
+
+ if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
+ ret = !!atomic_dec_and_test(&genpd->sd_count);
+
+ return ret;
+}
+
+static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
+{
+ atomic_inc(&genpd->sd_count);
+ smp_mb__after_atomic();
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void genpd_update_accounting(struct generic_pm_domain *genpd)
+{
+ ktime_t delta, now;
+
+ now = ktime_get();
+ delta = ktime_sub(now, genpd->accounting_time);
+
+ /*
+ * If genpd->status is active, it means we are just
+ * out of off and so update the idle time and vice
+ * versa.
+ */
+ if (genpd->status == GENPD_STATE_ON) {
+ int state_idx = genpd->state_idx;
+
+ genpd->states[state_idx].idle_time =
+ ktime_add(genpd->states[state_idx].idle_time, delta);
+ } else {
+ genpd->on_time = ktime_add(genpd->on_time, delta);
+ }
+
+ genpd->accounting_time = now;
+}
+#else
+static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
+#endif
+
+static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
+ unsigned int state)
+{
+ struct generic_pm_domain_data *pd_data;
+ struct pm_domain_data *pdd;
+ struct gpd_link *link;
+
+ /* New requested state is same as Max requested state */
+ if (state == genpd->performance_state)
+ return state;
+
+ /* New requested state is higher than Max requested state */
+ if (state > genpd->performance_state)
+ return state;
+
+ /* Traverse all devices within the domain */
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ pd_data = to_gpd_data(pdd);
+
+ if (pd_data->performance_state > state)
+ state = pd_data->performance_state;
+ }
+
+ /*
+ * Traverse all sub-domains within the domain. This can be
+ * done without any additional locking as the link->performance_state
+ * field is protected by the parent genpd->lock, which is already taken.
+ *
+ * Also note that link->performance_state (subdomain's performance state
+ * requirement to parent domain) is different from
+ * link->child->performance_state (current performance state requirement
+ * of the devices/sub-domains of the subdomain) and so can have a
+ * different value.
+ *
+ * Note that we also take vote from powered-off sub-domains into account
+ * as the same is done for devices right now.
+ */
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ if (link->performance_state > state)
+ state = link->performance_state;
+ }
+
+ return state;
+}
+
+static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
+ unsigned int state, int depth)
+{
+ struct generic_pm_domain *parent;
+ struct gpd_link *link;
+ int parent_state, ret;
+
+ if (state == genpd->performance_state)
+ return 0;
+
+ /* Propagate to parents of genpd */
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ parent = link->parent;
+
+ if (!parent->set_performance_state)
+ continue;
+
+ /* Find parent's performance state */
+ ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
+ parent->opp_table,
+ state);
+ if (unlikely(ret < 0))
+ goto err;
+
+ parent_state = ret;
+
+ genpd_lock_nested(parent, depth + 1);
+
+ link->prev_performance_state = link->performance_state;
+ link->performance_state = parent_state;
+ parent_state = _genpd_reeval_performance_state(parent,
+ parent_state);
+ ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
+ if (ret)
+ link->performance_state = link->prev_performance_state;
+
+ genpd_unlock(parent);
+
+ if (ret)
+ goto err;
+ }
+
+ ret = genpd->set_performance_state(genpd, state);
+ if (ret)
+ goto err;
+
+ genpd->performance_state = state;
+ return 0;
+
+err:
+ /* Encountered an error, lets rollback */
+ list_for_each_entry_continue_reverse(link, &genpd->child_links,
+ child_node) {
+ parent = link->parent;
+
+ if (!parent->set_performance_state)
+ continue;
+
+ genpd_lock_nested(parent, depth + 1);
+
+ parent_state = link->prev_performance_state;
+ link->performance_state = parent_state;
+
+ parent_state = _genpd_reeval_performance_state(parent,
+ parent_state);
+ if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
+ pr_err("%s: Failed to roll back to %d performance state\n",
+ parent->name, parent_state);
+ }
+
+ genpd_unlock(parent);
+ }
+
+ return ret;
+}
+
+/**
+ * dev_pm_genpd_set_performance_state- Set performance state of device's power
+ * domain.
+ *
+ * @dev: Device for which the performance-state needs to be set.
+ * @state: Target performance state of the device. This can be set as 0 when the
+ * device doesn't have any performance state constraints left (And so
+ * the device wouldn't participate anymore to find the target
+ * performance state of the genpd).
+ *
+ * It is assumed that the users guarantee that the genpd wouldn't be detached
+ * while this routine is getting called.
+ *
+ * Returns 0 on success and negative error values on failures.
+ */
+int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
+{
+ struct generic_pm_domain *genpd;
+ struct generic_pm_domain_data *gpd_data;
+ unsigned int prev;
+ int ret;
+
+ genpd = dev_to_genpd_safe(dev);
+ if (!genpd)
+ return -ENODEV;
+
+ if (unlikely(!genpd->set_performance_state))
+ return -EINVAL;
+
+ if (WARN_ON(!dev->power.subsys_data ||
+ !dev->power.subsys_data->domain_data))
+ return -EINVAL;
+
+ genpd_lock(genpd);
+
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+ prev = gpd_data->performance_state;
+ gpd_data->performance_state = state;
+
+ state = _genpd_reeval_performance_state(genpd, state);
+ ret = _genpd_set_performance_state(genpd, state, 0);
+ if (ret)
+ gpd_data->performance_state = prev;
+
+ genpd_unlock(genpd);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
+
+static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
+{
+ unsigned int state_idx = genpd->state_idx;
+ ktime_t time_start;
+ s64 elapsed_ns;
+ int ret;
+
+ /* Notify consumers that we are about to power on. */
+ ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
+ GENPD_NOTIFY_PRE_ON,
+ GENPD_NOTIFY_OFF, NULL);
+ ret = notifier_to_errno(ret);
+ if (ret)
+ return ret;
+
+ if (!genpd->power_on)
+ goto out;
+
+ if (!timed) {
+ ret = genpd->power_on(genpd);
+ if (ret)
+ goto err;
+
+ goto out;
+ }
+
+ time_start = ktime_get();
+ ret = genpd->power_on(genpd);
+ if (ret)
+ goto err;
+
+ elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+ if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
+ goto out;
+
+ genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
+ genpd->max_off_time_changed = true;
+ pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
+ genpd->name, "on", elapsed_ns);
+
+out:
+ raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
+ return 0;
+err:
+ raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
+ NULL);
+ return ret;
+}
+
+static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
+{
+ unsigned int state_idx = genpd->state_idx;
+ ktime_t time_start;
+ s64 elapsed_ns;
+ int ret;
+
+ /* Notify consumers that we are about to power off. */
+ ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
+ GENPD_NOTIFY_PRE_OFF,
+ GENPD_NOTIFY_ON, NULL);
+ ret = notifier_to_errno(ret);
+ if (ret)
+ return ret;
+
+ if (!genpd->power_off)
+ goto out;
+
+ if (!timed) {
+ ret = genpd->power_off(genpd);
+ if (ret)
+ goto busy;
+
+ goto out;
+ }
+
+ time_start = ktime_get();
+ ret = genpd->power_off(genpd);
+ if (ret)
+ goto busy;
+
+ elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+ if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
+ goto out;
+
+ genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
+ genpd->max_off_time_changed = true;
+ pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
+ genpd->name, "off", elapsed_ns);
+
+out:
+ raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
+ NULL);
+ return 0;
+busy:
+ raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
+ return ret;
+}
+
+/**
+ * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
+ * @genpd: PM domain to power off.
+ *
+ * Queue up the execution of genpd_power_off() unless it's already been done
+ * before.
+ */
+static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
+{
+ queue_work(pm_wq, &genpd->power_off_work);
+}
+
+/**
+ * genpd_power_off - Remove power from a given PM domain.
+ * @genpd: PM domain to power down.
+ * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
+ * RPM status of the releated device is in an intermediate state, not yet turned
+ * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
+ * be RPM_SUSPENDED, while it tries to power off the PM domain.
+ *
+ * If all of the @genpd's devices have been suspended and all of its subdomains
+ * have been powered down, remove power from @genpd.
+ */
+static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
+ unsigned int depth)
+{
+ struct pm_domain_data *pdd;
+ struct gpd_link *link;
+ unsigned int not_suspended = 0;
+ int ret;
+
+ /*
+ * Do not try to power off the domain in the following situations:
+ * (1) The domain is already in the "power off" state.
+ * (2) System suspend is in progress.
+ */
+ if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
+ return 0;
+
+ /*
+ * Abort power off for the PM domain in the following situations:
+ * (1) The domain is configured as always on.
+ * (2) When the domain has a subdomain being powered on.
+ */
+ if (genpd_is_always_on(genpd) ||
+ genpd_is_rpm_always_on(genpd) ||
+ atomic_read(&genpd->sd_count) > 0)
+ return -EBUSY;
+
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ enum pm_qos_flags_status stat;
+
+ stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF);
+ if (stat > PM_QOS_FLAGS_NONE)
+ return -EBUSY;
+
+ /*
+ * Do not allow PM domain to be powered off, when an IRQ safe
+ * device is part of a non-IRQ safe domain.
+ */
+ if (!pm_runtime_suspended(pdd->dev) ||
+ irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
+ not_suspended++;
+ }
+
+ if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
+ return -EBUSY;
+
+ if (genpd->gov && genpd->gov->power_down_ok) {
+ if (!genpd->gov->power_down_ok(&genpd->domain))
+ return -EAGAIN;
+ }
+
+ /* Default to shallowest state. */
+ if (!genpd->gov)
+ genpd->state_idx = 0;
+
+ /* Don't power off, if a child domain is waiting to power on. */
+ if (atomic_read(&genpd->sd_count) > 0)
+ return -EBUSY;
+
+ ret = _genpd_power_off(genpd, true);
+ if (ret) {
+ genpd->states[genpd->state_idx].rejected++;
+ return ret;
+ }
+
+ genpd->status = GENPD_STATE_OFF;
+ genpd_update_accounting(genpd);
+ genpd->states[genpd->state_idx].usage++;
+
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ genpd_sd_counter_dec(link->parent);
+ genpd_lock_nested(link->parent, depth + 1);
+ genpd_power_off(link->parent, false, depth + 1);
+ genpd_unlock(link->parent);
+ }
+
+ return 0;
+}
+
+/**
+ * genpd_power_on - Restore power to a given PM domain and its parents.
+ * @genpd: PM domain to power up.
+ * @depth: nesting count for lockdep.
+ *
+ * Restore power to @genpd and all of its parents so that it is possible to
+ * resume a device belonging to it.
+ */
+static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
+{
+ struct gpd_link *link;
+ int ret = 0;
+
+ if (genpd_status_on(genpd))
+ return 0;
+
+ /*
+ * The list is guaranteed not to change while the loop below is being
+ * executed, unless one of the parents' .power_on() callbacks fiddles
+ * with it.
+ */
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ struct generic_pm_domain *parent = link->parent;
+
+ genpd_sd_counter_inc(parent);
+
+ genpd_lock_nested(parent, depth + 1);
+ ret = genpd_power_on(parent, depth + 1);
+ genpd_unlock(parent);
+
+ if (ret) {
+ genpd_sd_counter_dec(parent);
+ goto err;
+ }
+ }
+
+ ret = _genpd_power_on(genpd, true);
+ if (ret)
+ goto err;
+
+ genpd->status = GENPD_STATE_ON;
+ genpd_update_accounting(genpd);
+
+ return 0;
+
+ err:
+ list_for_each_entry_continue_reverse(link,
+ &genpd->child_links,
+ child_node) {
+ genpd_sd_counter_dec(link->parent);
+ genpd_lock_nested(link->parent, depth + 1);
+ genpd_power_off(link->parent, false, depth + 1);
+ genpd_unlock(link->parent);
+ }
+
+ return ret;
+}
+
+static int genpd_dev_pm_start(struct device *dev)
+{
+ struct generic_pm_domain *genpd = dev_to_genpd(dev);
+
+ return genpd_start_dev(genpd, dev);
+}
+
+static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
+ unsigned long val, void *ptr)
+{
+ struct generic_pm_domain_data *gpd_data;
+ struct device *dev;
+
+ gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
+ dev = gpd_data->base.dev;
+
+ for (;;) {
+ struct generic_pm_domain *genpd;
+ struct pm_domain_data *pdd;
+
+ spin_lock_irq(&dev->power.lock);
+
+ pdd = dev->power.subsys_data ?
+ dev->power.subsys_data->domain_data : NULL;
+ if (pdd) {
+ to_gpd_data(pdd)->td.constraint_changed = true;
+ genpd = dev_to_genpd(dev);
+ } else {
+ genpd = ERR_PTR(-ENODATA);
+ }
+
+ spin_unlock_irq(&dev->power.lock);
+
+ if (!IS_ERR(genpd)) {
+ genpd_lock(genpd);
+ genpd->max_off_time_changed = true;
+ genpd_unlock(genpd);
+ }
+
+ dev = dev->parent;
+ if (!dev || dev->power.ignore_children)
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
+ * @work: Work structure used for scheduling the execution of this function.
+ */
+static void genpd_power_off_work_fn(struct work_struct *work)
+{
+ struct generic_pm_domain *genpd;
+
+ genpd = container_of(work, struct generic_pm_domain, power_off_work);
+
+ genpd_lock(genpd);
+ genpd_power_off(genpd, false, 0);
+ genpd_unlock(genpd);
+}
+
+/**
+ * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
+ * @dev: Device to handle.
+ */
+static int __genpd_runtime_suspend(struct device *dev)
+{
+ int (*cb)(struct device *__dev);
+
+ if (dev->type && dev->type->pm)
+ cb = dev->type->pm->runtime_suspend;
+ else if (dev->class && dev->class->pm)
+ cb = dev->class->pm->runtime_suspend;
+ else if (dev->bus && dev->bus->pm)
+ cb = dev->bus->pm->runtime_suspend;
+ else
+ cb = NULL;
+
+ if (!cb && dev->driver && dev->driver->pm)
+ cb = dev->driver->pm->runtime_suspend;
+
+ return cb ? cb(dev) : 0;
+}
+
+/**
+ * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
+ * @dev: Device to handle.
+ */
+static int __genpd_runtime_resume(struct device *dev)
+{
+ int (*cb)(struct device *__dev);
+
+ if (dev->type && dev->type->pm)
+ cb = dev->type->pm->runtime_resume;
+ else if (dev->class && dev->class->pm)
+ cb = dev->class->pm->runtime_resume;
+ else if (dev->bus && dev->bus->pm)
+ cb = dev->bus->pm->runtime_resume;
+ else
+ cb = NULL;
+
+ if (!cb && dev->driver && dev->driver->pm)
+ cb = dev->driver->pm->runtime_resume;
+
+ return cb ? cb(dev) : 0;
+}
+
+/**
+ * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
+ * @dev: Device to suspend.
+ *
+ * Carry out a runtime suspend of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a PM domain consisting of I/O devices.
+ */
+static int genpd_runtime_suspend(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ bool (*suspend_ok)(struct device *__dev);
+ struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
+ bool runtime_pm = pm_runtime_enabled(dev);
+ ktime_t time_start;
+ s64 elapsed_ns;
+ int ret;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ /*
+ * A runtime PM centric subsystem/driver may re-use the runtime PM
+ * callbacks for other purposes than runtime PM. In those scenarios
+ * runtime PM is disabled. Under these circumstances, we shall skip
+ * validating/measuring the PM QoS latency.
+ */
+ suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
+ if (runtime_pm && suspend_ok && !suspend_ok(dev))
+ return -EBUSY;
+
+ /* Measure suspend latency. */
+ time_start = 0;
+ if (runtime_pm)
+ time_start = ktime_get();
+
+ ret = __genpd_runtime_suspend(dev);
+ if (ret)
+ return ret;
+
+ ret = genpd_stop_dev(genpd, dev);
+ if (ret) {
+ __genpd_runtime_resume(dev);
+ return ret;
+ }
+
+ /* Update suspend latency value if the measured time exceeds it. */
+ if (runtime_pm) {
+ elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+ if (elapsed_ns > td->suspend_latency_ns) {
+ td->suspend_latency_ns = elapsed_ns;
+ dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
+ elapsed_ns);
+ genpd->max_off_time_changed = true;
+ td->constraint_changed = true;
+ }
+ }
+
+ /*
+ * If power.irq_safe is set, this routine may be run with
+ * IRQs disabled, so suspend only if the PM domain also is irq_safe.
+ */
+ if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
+ return 0;
+
+ genpd_lock(genpd);
+ genpd_power_off(genpd, true, 0);
+ genpd_unlock(genpd);
+
+ return 0;
+}
+
+/**
+ * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
+ * @dev: Device to resume.
+ *
+ * Carry out a runtime resume of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a PM domain consisting of I/O devices.
+ */
+static int genpd_runtime_resume(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
+ bool runtime_pm = pm_runtime_enabled(dev);
+ ktime_t time_start;
+ s64 elapsed_ns;
+ int ret;
+ bool timed = true;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ /*
+ * As we don't power off a non IRQ safe domain, which holds
+ * an IRQ safe device, we don't need to restore power to it.
+ */
+ if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
+ timed = false;
+ goto out;
+ }
+
+ genpd_lock(genpd);
+ ret = genpd_power_on(genpd, 0);
+ genpd_unlock(genpd);
+
+ if (ret)
+ return ret;
+
+ out:
+ /* Measure resume latency. */
+ time_start = 0;
+ if (timed && runtime_pm)
+ time_start = ktime_get();
+
+ ret = genpd_start_dev(genpd, dev);
+ if (ret)
+ goto err_poweroff;
+
+ ret = __genpd_runtime_resume(dev);
+ if (ret)
+ goto err_stop;
+
+ /* Update resume latency value if the measured time exceeds it. */
+ if (timed && runtime_pm) {
+ elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+ if (elapsed_ns > td->resume_latency_ns) {
+ td->resume_latency_ns = elapsed_ns;
+ dev_dbg(dev, "resume latency exceeded, %lld ns\n",
+ elapsed_ns);
+ genpd->max_off_time_changed = true;
+ td->constraint_changed = true;
+ }
+ }
+
+ return 0;
+
+err_stop:
+ genpd_stop_dev(genpd, dev);
+err_poweroff:
+ if (!pm_runtime_is_irq_safe(dev) ||
+ (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
+ genpd_lock(genpd);
+ genpd_power_off(genpd, true, 0);
+ genpd_unlock(genpd);
+ }
+
+ return ret;
+}
+
+static bool pd_ignore_unused;
+static int __init pd_ignore_unused_setup(char *__unused)
+{
+ pd_ignore_unused = true;
+ return 1;
+}
+__setup("pd_ignore_unused", pd_ignore_unused_setup);
+
+/**
+ * genpd_power_off_unused - Power off all PM domains with no devices in use.
+ */
+static int __init genpd_power_off_unused(void)
+{
+ struct generic_pm_domain *genpd;
+
+ if (pd_ignore_unused) {
+ pr_warn("genpd: Not disabling unused power domains\n");
+ return 0;
+ }
+
+ mutex_lock(&gpd_list_lock);
+
+ list_for_each_entry(genpd, &gpd_list, gpd_list_node)
+ genpd_queue_power_off_work(genpd);
+
+ mutex_unlock(&gpd_list_lock);
+
+ return 0;
+}
+late_initcall(genpd_power_off_unused);
+
+#ifdef CONFIG_PM_SLEEP
+
+/**
+ * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
+ * @genpd: PM domain to power off, if possible.
+ * @use_lock: use the lock.
+ * @depth: nesting count for lockdep.
+ *
+ * Check if the given PM domain can be powered off (during system suspend or
+ * hibernation) and do that if so. Also, in that case propagate to its parents.
+ *
+ * This function is only called in "noirq" and "syscore" stages of system power
+ * transitions. The "noirq" callbacks may be executed asynchronously, thus in
+ * these cases the lock must be held.
+ */
+static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
+ unsigned int depth)
+{
+ struct gpd_link *link;
+
+ if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
+ return;
+
+ if (genpd->suspended_count != genpd->device_count
+ || atomic_read(&genpd->sd_count) > 0)
+ return;
+
+ /* Choose the deepest state when suspending */
+ genpd->state_idx = genpd->state_count - 1;
+ if (_genpd_power_off(genpd, false))
+ return;
+
+ genpd->status = GENPD_STATE_OFF;
+
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ genpd_sd_counter_dec(link->parent);
+
+ if (use_lock)
+ genpd_lock_nested(link->parent, depth + 1);
+
+ genpd_sync_power_off(link->parent, use_lock, depth + 1);
+
+ if (use_lock)
+ genpd_unlock(link->parent);
+ }
+}
+
+/**
+ * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
+ * @genpd: PM domain to power on.
+ * @use_lock: use the lock.
+ * @depth: nesting count for lockdep.
+ *
+ * This function is only called in "noirq" and "syscore" stages of system power
+ * transitions. The "noirq" callbacks may be executed asynchronously, thus in
+ * these cases the lock must be held.
+ */
+static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
+ unsigned int depth)
+{
+ struct gpd_link *link;
+
+ if (genpd_status_on(genpd))
+ return;
+
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ genpd_sd_counter_inc(link->parent);
+
+ if (use_lock)
+ genpd_lock_nested(link->parent, depth + 1);
+
+ genpd_sync_power_on(link->parent, use_lock, depth + 1);
+
+ if (use_lock)
+ genpd_unlock(link->parent);
+ }
+
+ _genpd_power_on(genpd, false);
+ genpd->status = GENPD_STATE_ON;
+}
+
+/**
+ * resume_needed - Check whether to resume a device before system suspend.
+ * @dev: Device to check.
+ * @genpd: PM domain the device belongs to.
+ *
+ * There are two cases in which a device that can wake up the system from sleep
+ * states should be resumed by genpd_prepare(): (1) if the device is enabled
+ * to wake up the system and it has to remain active for this purpose while the
+ * system is in the sleep state and (2) if the device is not enabled to wake up
+ * the system from sleep states and it generally doesn't generate wakeup signals
+ * by itself (those signals are generated on its behalf by other parts of the
+ * system). In the latter case it may be necessary to reconfigure the device's
+ * wakeup settings during system suspend, because it may have been set up to
+ * signal remote wakeup from the system's working state as needed by runtime PM.
+ * Return 'true' in either of the above cases.
+ */
+static bool resume_needed(struct device *dev,
+ const struct generic_pm_domain *genpd)
+{
+ bool active_wakeup;
+
+ if (!device_can_wakeup(dev))
+ return false;
+
+ active_wakeup = genpd_is_active_wakeup(genpd);
+ return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
+}
+
+/**
+ * genpd_prepare - Start power transition of a device in a PM domain.
+ * @dev: Device to start the transition of.
+ *
+ * Start a power transition of a device (during a system-wide power transition)
+ * under the assumption that its pm_domain field points to the domain member of
+ * an object of type struct generic_pm_domain representing a PM domain
+ * consisting of I/O devices.
+ */
+static int genpd_prepare(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ int ret;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ /*
+ * If a wakeup request is pending for the device, it should be woken up
+ * at this point and a system wakeup event should be reported if it's
+ * set up to wake up the system from sleep states.
+ */
+ if (resume_needed(dev, genpd))
+ pm_runtime_resume(dev);
+
+ genpd_lock(genpd);
+
+ if (genpd->prepared_count++ == 0)
+ genpd->suspended_count = 0;
+
+ genpd_unlock(genpd);
+
+ ret = pm_generic_prepare(dev);
+ if (ret < 0) {
+ genpd_lock(genpd);
+
+ genpd->prepared_count--;
+
+ genpd_unlock(genpd);
+ }
+
+ /* Never return 1, as genpd don't cope with the direct_complete path. */
+ return ret >= 0 ? 0 : ret;
+}
+
+/**
+ * genpd_finish_suspend - Completion of suspend or hibernation of device in an
+ * I/O pm domain.
+ * @dev: Device to suspend.
+ * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback.
+ *
+ * Stop the device and remove power from the domain if all devices in it have
+ * been stopped.
+ */
+static int genpd_finish_suspend(struct device *dev, bool poweroff)
+{
+ struct generic_pm_domain *genpd;
+ int ret = 0;
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ if (poweroff)
+ ret = pm_generic_poweroff_noirq(dev);
+ else
+ ret = pm_generic_suspend_noirq(dev);
+ if (ret)
+ return ret;
+
+ if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
+ return 0;
+
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev)) {
+ ret = genpd_stop_dev(genpd, dev);
+ if (ret) {
+ if (poweroff)
+ pm_generic_restore_noirq(dev);
+ else
+ pm_generic_resume_noirq(dev);
+ return ret;
+ }
+ }
+
+ genpd_lock(genpd);
+ genpd->suspended_count++;
+ genpd_sync_power_off(genpd, true, 0);
+ genpd_unlock(genpd);
+
+ return 0;
+}
+
+/**
+ * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
+ * @dev: Device to suspend.
+ *
+ * Stop the device and remove power from the domain if all devices in it have
+ * been stopped.
+ */
+static int genpd_suspend_noirq(struct device *dev)
+{
+ dev_dbg(dev, "%s()\n", __func__);
+
+ return genpd_finish_suspend(dev, false);
+}
+
+/**
+ * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
+ * @dev: Device to resume.
+ *
+ * Restore power to the device's PM domain, if necessary, and start the device.
+ */
+static int genpd_resume_noirq(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ int ret;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
+ return pm_generic_resume_noirq(dev);
+
+ genpd_lock(genpd);
+ genpd_sync_power_on(genpd, true, 0);
+ genpd->suspended_count--;
+ genpd_unlock(genpd);
+
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev)) {
+ ret = genpd_start_dev(genpd, dev);
+ if (ret)
+ return ret;
+ }
+
+ return pm_generic_resume_noirq(dev);
+}
+
+/**
+ * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
+ * @dev: Device to freeze.
+ *
+ * Carry out a late freeze of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a power domain consisting of I/O
+ * devices.
+ */
+static int genpd_freeze_noirq(struct device *dev)
+{
+ const struct generic_pm_domain *genpd;
+ int ret = 0;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ ret = pm_generic_freeze_noirq(dev);
+ if (ret)
+ return ret;
+
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev))
+ ret = genpd_stop_dev(genpd, dev);
+
+ return ret;
+}
+
+/**
+ * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
+ * @dev: Device to thaw.
+ *
+ * Start the device, unless power has been removed from the domain already
+ * before the system transition.
+ */
+static int genpd_thaw_noirq(struct device *dev)
+{
+ const struct generic_pm_domain *genpd;
+ int ret = 0;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev)) {
+ ret = genpd_start_dev(genpd, dev);
+ if (ret)
+ return ret;
+ }
+
+ return pm_generic_thaw_noirq(dev);
+}
+
+/**
+ * genpd_poweroff_noirq - Completion of hibernation of device in an
+ * I/O PM domain.
+ * @dev: Device to poweroff.
+ *
+ * Stop the device and remove power from the domain if all devices in it have
+ * been stopped.
+ */
+static int genpd_poweroff_noirq(struct device *dev)
+{
+ dev_dbg(dev, "%s()\n", __func__);
+
+ return genpd_finish_suspend(dev, true);
+}
+
+/**
+ * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
+ * @dev: Device to resume.
+ *
+ * Make sure the domain will be in the same power state as before the
+ * hibernation the system is resuming from and start the device if necessary.
+ */
+static int genpd_restore_noirq(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ int ret = 0;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ /*
+ * At this point suspended_count == 0 means we are being run for the
+ * first time for the given domain in the present cycle.
+ */
+ genpd_lock(genpd);
+ if (genpd->suspended_count++ == 0) {
+ /*
+ * The boot kernel might put the domain into arbitrary state,
+ * so make it appear as powered off to genpd_sync_power_on(),
+ * so that it tries to power it on in case it was really off.
+ */
+ genpd->status = GENPD_STATE_OFF;
+ }
+
+ genpd_sync_power_on(genpd, true, 0);
+ genpd_unlock(genpd);
+
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev)) {
+ ret = genpd_start_dev(genpd, dev);
+ if (ret)
+ return ret;
+ }
+
+ return pm_generic_restore_noirq(dev);
+}
+
+/**
+ * genpd_complete - Complete power transition of a device in a power domain.
+ * @dev: Device to complete the transition of.
+ *
+ * Complete a power transition of a device (during a system-wide power
+ * transition) under the assumption that its pm_domain field points to the
+ * domain member of an object of type struct generic_pm_domain representing
+ * a power domain consisting of I/O devices.
+ */
+static void genpd_complete(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return;
+
+ pm_generic_complete(dev);
+
+ genpd_lock(genpd);
+
+ genpd->prepared_count--;
+ if (!genpd->prepared_count)
+ genpd_queue_power_off_work(genpd);
+
+ genpd_unlock(genpd);
+}
+
+/**
+ * genpd_syscore_switch - Switch power during system core suspend or resume.
+ * @dev: Device that normally is marked as "always on" to switch power for.
+ *
+ * This routine may only be called during the system core (syscore) suspend or
+ * resume phase for devices whose "always on" flags are set.
+ */
+static void genpd_syscore_switch(struct device *dev, bool suspend)
+{
+ struct generic_pm_domain *genpd;
+
+ genpd = dev_to_genpd_safe(dev);
+ if (!genpd)
+ return;
+
+ if (suspend) {
+ genpd->suspended_count++;
+ genpd_sync_power_off(genpd, false, 0);
+ } else {
+ genpd_sync_power_on(genpd, false, 0);
+ genpd->suspended_count--;
+ }
+}
+
+void pm_genpd_syscore_poweroff(struct device *dev)
+{
+ genpd_syscore_switch(dev, true);
+}
+EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
+
+void pm_genpd_syscore_poweron(struct device *dev)
+{
+ genpd_syscore_switch(dev, false);
+}
+EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
+
+#else /* !CONFIG_PM_SLEEP */
+
+#define genpd_prepare NULL
+#define genpd_suspend_noirq NULL
+#define genpd_resume_noirq NULL
+#define genpd_freeze_noirq NULL
+#define genpd_thaw_noirq NULL
+#define genpd_poweroff_noirq NULL
+#define genpd_restore_noirq NULL
+#define genpd_complete NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
+static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev)
+{
+ struct generic_pm_domain_data *gpd_data;
+ int ret;
+
+ ret = dev_pm_get_subsys_data(dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
+ if (!gpd_data) {
+ ret = -ENOMEM;
+ goto err_put;
+ }
+
+ gpd_data->base.dev = dev;
+ gpd_data->td.constraint_changed = true;
+ gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
+ gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
+
+ spin_lock_irq(&dev->power.lock);
+
+ if (dev->power.subsys_data->domain_data) {
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ dev->power.subsys_data->domain_data = &gpd_data->base;
+
+ spin_unlock_irq(&dev->power.lock);
+
+ return gpd_data;
+
+ err_free:
+ spin_unlock_irq(&dev->power.lock);
+ kfree(gpd_data);
+ err_put:
+ dev_pm_put_subsys_data(dev);
+ return ERR_PTR(ret);
+}
+
+static void genpd_free_dev_data(struct device *dev,
+ struct generic_pm_domain_data *gpd_data)
+{
+ spin_lock_irq(&dev->power.lock);
+
+ dev->power.subsys_data->domain_data = NULL;
+
+ spin_unlock_irq(&dev->power.lock);
+
+ kfree(gpd_data);
+ dev_pm_put_subsys_data(dev);
+}
+
+static void genpd_update_cpumask(struct generic_pm_domain *genpd,
+ int cpu, bool set, unsigned int depth)
+{
+ struct gpd_link *link;
+
+ if (!genpd_is_cpu_domain(genpd))
+ return;
+
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ struct generic_pm_domain *parent = link->parent;
+
+ genpd_lock_nested(parent, depth + 1);
+ genpd_update_cpumask(parent, cpu, set, depth + 1);
+ genpd_unlock(parent);
+ }
+
+ if (set)
+ cpumask_set_cpu(cpu, genpd->cpus);
+ else
+ cpumask_clear_cpu(cpu, genpd->cpus);
+}
+
+static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
+{
+ if (cpu >= 0)
+ genpd_update_cpumask(genpd, cpu, true, 0);
+}
+
+static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
+{
+ if (cpu >= 0)
+ genpd_update_cpumask(genpd, cpu, false, 0);
+}
+
+static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
+{
+ int cpu;
+
+ if (!genpd_is_cpu_domain(genpd))
+ return -1;
+
+ for_each_possible_cpu(cpu) {
+ if (get_cpu_device(cpu) == dev)
+ return cpu;
+ }
+
+ return -1;
+}
+
+static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
+ struct device *base_dev)
+{
+ struct generic_pm_domain_data *gpd_data;
+ int ret;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
+ return -EINVAL;
+
+ gpd_data = genpd_alloc_dev_data(dev);
+ if (IS_ERR(gpd_data))
+ return PTR_ERR(gpd_data);
+
+ gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
+
+ ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
+ if (ret)
+ goto out;
+
+ genpd_lock(genpd);
+
+ genpd_set_cpumask(genpd, gpd_data->cpu);
+ dev_pm_domain_set(dev, &genpd->domain);
+
+ genpd->device_count++;
+ genpd->max_off_time_changed = true;
+
+ list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
+
+ genpd_unlock(genpd);
+ out:
+ if (ret)
+ genpd_free_dev_data(dev, gpd_data);
+ else
+ dev_pm_qos_add_notifier(dev, &gpd_data->nb,
+ DEV_PM_QOS_RESUME_LATENCY);
+
+ return ret;
+}
+
+/**
+ * pm_genpd_add_device - Add a device to an I/O PM domain.
+ * @genpd: PM domain to add the device to.
+ * @dev: Device to be added.
+ */
+int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
+{
+ int ret;
+
+ mutex_lock(&gpd_list_lock);
+ ret = genpd_add_device(genpd, dev, dev);
+ mutex_unlock(&gpd_list_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_genpd_add_device);
+
+static int genpd_remove_device(struct generic_pm_domain *genpd,
+ struct device *dev)
+{
+ struct generic_pm_domain_data *gpd_data;
+ struct pm_domain_data *pdd;
+ int ret = 0;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ pdd = dev->power.subsys_data->domain_data;
+ gpd_data = to_gpd_data(pdd);
+ dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
+ DEV_PM_QOS_RESUME_LATENCY);
+
+ genpd_lock(genpd);
+
+ if (genpd->prepared_count > 0) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ genpd->device_count--;
+ genpd->max_off_time_changed = true;
+
+ genpd_clear_cpumask(genpd, gpd_data->cpu);
+ dev_pm_domain_set(dev, NULL);
+
+ list_del_init(&pdd->list_node);
+
+ genpd_unlock(genpd);
+
+ if (genpd->detach_dev)
+ genpd->detach_dev(genpd, dev);
+
+ genpd_free_dev_data(dev, gpd_data);
+
+ return 0;
+
+ out:
+ genpd_unlock(genpd);
+ dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
+
+ return ret;
+}
+
+/**
+ * pm_genpd_remove_device - Remove a device from an I/O PM domain.
+ * @dev: Device to be removed.
+ */
+int pm_genpd_remove_device(struct device *dev)
+{
+ struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
+
+ if (!genpd)
+ return -EINVAL;
+
+ return genpd_remove_device(genpd, dev);
+}
+EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
+
+/**
+ * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
+ *
+ * @dev: Device that should be associated with the notifier
+ * @nb: The notifier block to register
+ *
+ * Users may call this function to add a genpd power on/off notifier for an
+ * attached @dev. Only one notifier per device is allowed. The notifier is
+ * sent when genpd is powering on/off the PM domain.
+ *
+ * It is assumed that the user guarantee that the genpd wouldn't be detached
+ * while this routine is getting called.
+ *
+ * Returns 0 on success and negative error values on failures.
+ */
+int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
+{
+ struct generic_pm_domain *genpd;
+ struct generic_pm_domain_data *gpd_data;
+ int ret;
+
+ genpd = dev_to_genpd_safe(dev);
+ if (!genpd)
+ return -ENODEV;
+
+ if (WARN_ON(!dev->power.subsys_data ||
+ !dev->power.subsys_data->domain_data))
+ return -EINVAL;
+
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+ if (gpd_data->power_nb)
+ return -EEXIST;
+
+ genpd_lock(genpd);
+ ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
+ genpd_unlock(genpd);
+
+ if (ret) {
+ dev_warn(dev, "failed to add notifier for PM domain %s\n",
+ genpd->name);
+ return ret;
+ }
+
+ gpd_data->power_nb = nb;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
+
+/**
+ * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
+ *
+ * @dev: Device that is associated with the notifier
+ *
+ * Users may call this function to remove a genpd power on/off notifier for an
+ * attached @dev.
+ *
+ * It is assumed that the user guarantee that the genpd wouldn't be detached
+ * while this routine is getting called.
+ *
+ * Returns 0 on success and negative error values on failures.
+ */
+int dev_pm_genpd_remove_notifier(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ struct generic_pm_domain_data *gpd_data;
+ int ret;
+
+ genpd = dev_to_genpd_safe(dev);
+ if (!genpd)
+ return -ENODEV;
+
+ if (WARN_ON(!dev->power.subsys_data ||
+ !dev->power.subsys_data->domain_data))
+ return -EINVAL;
+
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+ if (!gpd_data->power_nb)
+ return -ENODEV;
+
+ genpd_lock(genpd);
+ ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
+ gpd_data->power_nb);
+ genpd_unlock(genpd);
+
+ if (ret) {
+ dev_warn(dev, "failed to remove notifier for PM domain %s\n",
+ genpd->name);
+ return ret;
+ }
+
+ gpd_data->power_nb = NULL;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
+
+static int genpd_add_subdomain(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *subdomain)
+{
+ struct gpd_link *link, *itr;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
+ || genpd == subdomain)
+ return -EINVAL;
+
+ /*
+ * If the domain can be powered on/off in an IRQ safe
+ * context, ensure that the subdomain can also be
+ * powered on/off in that context.
+ */
+ if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
+ WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
+ genpd->name, subdomain->name);
+ return -EINVAL;
+ }
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+
+ genpd_lock(subdomain);
+ genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
+
+ if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ list_for_each_entry(itr, &genpd->parent_links, parent_node) {
+ if (itr->child == subdomain && itr->parent == genpd) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ link->parent = genpd;
+ list_add_tail(&link->parent_node, &genpd->parent_links);
+ link->child = subdomain;
+ list_add_tail(&link->child_node, &subdomain->child_links);
+ if (genpd_status_on(subdomain))
+ genpd_sd_counter_inc(genpd);
+
+ out:
+ genpd_unlock(genpd);
+ genpd_unlock(subdomain);
+ if (ret)
+ kfree(link);
+ return ret;
+}
+
+/**
+ * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
+ * @genpd: Leader PM domain to add the subdomain to.
+ * @subdomain: Subdomain to be added.
+ */
+int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *subdomain)
+{
+ int ret;
+
+ mutex_lock(&gpd_list_lock);
+ ret = genpd_add_subdomain(genpd, subdomain);
+ mutex_unlock(&gpd_list_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
+
+/**
+ * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
+ * @genpd: Leader PM domain to remove the subdomain from.
+ * @subdomain: Subdomain to be removed.
+ */
+int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *subdomain)
+{
+ struct gpd_link *l, *link;
+ int ret = -EINVAL;
+
+ if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
+ return -EINVAL;
+
+ genpd_lock(subdomain);
+ genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
+
+ if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
+ pr_warn("%s: unable to remove subdomain %s\n",
+ genpd->name, subdomain->name);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
+ if (link->child != subdomain)
+ continue;
+
+ list_del(&link->parent_node);
+ list_del(&link->child_node);
+ kfree(link);
+ if (genpd_status_on(subdomain))
+ genpd_sd_counter_dec(genpd);
+
+ ret = 0;
+ break;
+ }
+
+out:
+ genpd_unlock(genpd);
+ genpd_unlock(subdomain);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
+
+static void genpd_free_default_power_state(struct genpd_power_state *states,
+ unsigned int state_count)
+{
+ kfree(states);
+}
+
+static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
+{
+ struct genpd_power_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ genpd->states = state;
+ genpd->state_count = 1;
+ genpd->free_states = genpd_free_default_power_state;
+
+ return 0;
+}
+
+static void genpd_lock_init(struct generic_pm_domain *genpd)
+{
+ if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
+ spin_lock_init(&genpd->slock);
+ genpd->lock_ops = &genpd_spin_ops;
+ } else {
+ mutex_init(&genpd->mlock);
+ genpd->lock_ops = &genpd_mtx_ops;
+ }
+}
+
+/**
+ * pm_genpd_init - Initialize a generic I/O PM domain object.
+ * @genpd: PM domain object to initialize.
+ * @gov: PM domain governor to associate with the domain (may be NULL).
+ * @is_off: Initial value of the domain's power_is_off field.
+ *
+ * Returns 0 on successful initialization, else a negative error code.
+ */
+int pm_genpd_init(struct generic_pm_domain *genpd,
+ struct dev_power_governor *gov, bool is_off)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(genpd))
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&genpd->parent_links);
+ INIT_LIST_HEAD(&genpd->child_links);
+ INIT_LIST_HEAD(&genpd->dev_list);
+ RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
+ genpd_lock_init(genpd);
+ genpd->gov = gov;
+ INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
+ atomic_set(&genpd->sd_count, 0);
+ genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
+ genpd->device_count = 0;
+ genpd->max_off_time_ns = -1;
+ genpd->max_off_time_changed = true;
+ genpd->provider = NULL;
+ genpd->has_provider = false;
+ genpd->accounting_time = ktime_get();
+ genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
+ genpd->domain.ops.runtime_resume = genpd_runtime_resume;
+ genpd->domain.ops.prepare = genpd_prepare;
+ genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
+ genpd->domain.ops.resume_noirq = genpd_resume_noirq;
+ genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
+ genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
+ genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
+ genpd->domain.ops.restore_noirq = genpd_restore_noirq;
+ genpd->domain.ops.complete = genpd_complete;
+ genpd->domain.start = genpd_dev_pm_start;
+
+ if (genpd->flags & GENPD_FLAG_PM_CLK) {
+ genpd->dev_ops.stop = pm_clk_suspend;
+ genpd->dev_ops.start = pm_clk_resume;
+ }
+
+ /* Always-on domains must be powered on at initialization. */
+ if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
+ !genpd_status_on(genpd))
+ return -EINVAL;
+
+ if (genpd_is_cpu_domain(genpd) &&
+ !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
+ return -ENOMEM;
+
+ /* Use only one "off" state if there were no states declared */
+ if (genpd->state_count == 0) {
+ ret = genpd_set_default_power_state(genpd);
+ if (ret) {
+ if (genpd_is_cpu_domain(genpd))
+ free_cpumask_var(genpd->cpus);
+ return ret;
+ }
+ } else if (!gov && genpd->state_count > 1) {
+ pr_warn("%s: no governor for states\n", genpd->name);
+ }
+
+ device_initialize(&genpd->dev);
+ dev_set_name(&genpd->dev, "%s", genpd->name);
+
+ mutex_lock(&gpd_list_lock);
+ list_add(&genpd->gpd_list_node, &gpd_list);
+ mutex_unlock(&gpd_list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pm_genpd_init);
+
+static int genpd_remove(struct generic_pm_domain *genpd)
+{
+ struct gpd_link *l, *link;
+
+ if (IS_ERR_OR_NULL(genpd))
+ return -EINVAL;
+
+ genpd_lock(genpd);
+
+ if (genpd->has_provider) {
+ genpd_unlock(genpd);
+ pr_err("Provider present, unable to remove %s\n", genpd->name);
+ return -EBUSY;
+ }
+
+ if (!list_empty(&genpd->parent_links) || genpd->device_count) {
+ genpd_unlock(genpd);
+ pr_err("%s: unable to remove %s\n", __func__, genpd->name);
+ return -EBUSY;
+ }
+
+ list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
+ list_del(&link->parent_node);
+ list_del(&link->child_node);
+ kfree(link);
+ }
+
+ list_del(&genpd->gpd_list_node);
+ genpd_unlock(genpd);
+ cancel_work_sync(&genpd->power_off_work);
+ if (genpd_is_cpu_domain(genpd))
+ free_cpumask_var(genpd->cpus);
+ if (genpd->free_states)
+ genpd->free_states(genpd->states, genpd->state_count);
+
+ pr_debug("%s: removed %s\n", __func__, genpd->name);
+
+ return 0;
+}
+
+/**
+ * pm_genpd_remove - Remove a generic I/O PM domain
+ * @genpd: Pointer to PM domain that is to be removed.
+ *
+ * To remove the PM domain, this function:
+ * - Removes the PM domain as a subdomain to any parent domains,
+ * if it was added.
+ * - Removes the PM domain from the list of registered PM domains.
+ *
+ * The PM domain will only be removed, if the associated provider has
+ * been removed, it is not a parent to any other PM domain and has no
+ * devices associated with it.
+ */
+int pm_genpd_remove(struct generic_pm_domain *genpd)
+{
+ int ret;
+
+ mutex_lock(&gpd_list_lock);
+ ret = genpd_remove(genpd);
+ mutex_unlock(&gpd_list_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_genpd_remove);
+
+#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
+
+/*
+ * Device Tree based PM domain providers.
+ *
+ * The code below implements generic device tree based PM domain providers that
+ * bind device tree nodes with generic PM domains registered in the system.
+ *
+ * Any driver that registers generic PM domains and needs to support binding of
+ * devices to these domains is supposed to register a PM domain provider, which
+ * maps a PM domain specifier retrieved from the device tree to a PM domain.
+ *
+ * Two simple mapping functions have been provided for convenience:
+ * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
+ * - genpd_xlate_onecell() for mapping of multiple PM domains per node by
+ * index.
+ */
+
+/**
+ * struct of_genpd_provider - PM domain provider registration structure
+ * @link: Entry in global list of PM domain providers
+ * @node: Pointer to device tree node of PM domain provider
+ * @xlate: Provider-specific xlate callback mapping a set of specifier cells
+ * into a PM domain.
+ * @data: context pointer to be passed into @xlate callback
+ */
+struct of_genpd_provider {
+ struct list_head link;
+ struct device_node *node;
+ genpd_xlate_t xlate;
+ void *data;
+};
+
+/* List of registered PM domain providers. */
+static LIST_HEAD(of_genpd_providers);
+/* Mutex to protect the list above. */
+static DEFINE_MUTEX(of_genpd_mutex);
+
+/**
+ * genpd_xlate_simple() - Xlate function for direct node-domain mapping
+ * @genpdspec: OF phandle args to map into a PM domain
+ * @data: xlate function private data - pointer to struct generic_pm_domain
+ *
+ * This is a generic xlate function that can be used to model PM domains that
+ * have their own device tree nodes. The private data of xlate function needs
+ * to be a valid pointer to struct generic_pm_domain.
+ */
+static struct generic_pm_domain *genpd_xlate_simple(
+ struct of_phandle_args *genpdspec,
+ void *data)
+{
+ return data;
+}
+
+/**
+ * genpd_xlate_onecell() - Xlate function using a single index.
+ * @genpdspec: OF phandle args to map into a PM domain
+ * @data: xlate function private data - pointer to struct genpd_onecell_data
+ *
+ * This is a generic xlate function that can be used to model simple PM domain
+ * controllers that have one device tree node and provide multiple PM domains.
+ * A single cell is used as an index into an array of PM domains specified in
+ * the genpd_onecell_data struct when registering the provider.
+ */
+static struct generic_pm_domain *genpd_xlate_onecell(
+ struct of_phandle_args *genpdspec,
+ void *data)
+{
+ struct genpd_onecell_data *genpd_data = data;
+ unsigned int idx = genpdspec->args[0];
+
+ if (genpdspec->args_count != 1)
+ return ERR_PTR(-EINVAL);
+
+ if (idx >= genpd_data->num_domains) {
+ pr_err("%s: invalid domain index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!genpd_data->domains[idx])
+ return ERR_PTR(-ENOENT);
+
+ return genpd_data->domains[idx];
+}
+
+/**
+ * genpd_add_provider() - Register a PM domain provider for a node
+ * @np: Device node pointer associated with the PM domain provider.
+ * @xlate: Callback for decoding PM domain from phandle arguments.
+ * @data: Context pointer for @xlate callback.
+ */
+static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
+ void *data)
+{
+ struct of_genpd_provider *cp;
+
+ cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+ if (!cp)
+ return -ENOMEM;
+
+ cp->node = of_node_get(np);
+ cp->data = data;
+ cp->xlate = xlate;
+
+ mutex_lock(&of_genpd_mutex);
+ list_add(&cp->link, &of_genpd_providers);
+ mutex_unlock(&of_genpd_mutex);
+ pr_debug("Added domain provider from %pOF\n", np);
+
+ return 0;
+}
+
+static bool genpd_present(const struct generic_pm_domain *genpd)
+{
+ const struct generic_pm_domain *gpd;
+
+ list_for_each_entry(gpd, &gpd_list, gpd_list_node)
+ if (gpd == genpd)
+ return true;
+ return false;
+}
+
+/**
+ * of_genpd_add_provider_simple() - Register a simple PM domain provider
+ * @np: Device node pointer associated with the PM domain provider.
+ * @genpd: Pointer to PM domain associated with the PM domain provider.
+ */
+int of_genpd_add_provider_simple(struct device_node *np,
+ struct generic_pm_domain *genpd)
+{
+ int ret = -EINVAL;
+
+ if (!np || !genpd)
+ return -EINVAL;
+
+ mutex_lock(&gpd_list_lock);
+
+ if (!genpd_present(genpd))
+ goto unlock;
+
+ genpd->dev.of_node = np;
+
+ /* Parse genpd OPP table */
+ if (genpd->set_performance_state) {
+ ret = dev_pm_opp_of_add_table(&genpd->dev);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
+ ret);
+ goto unlock;
+ }
+
+ /*
+ * Save table for faster processing while setting performance
+ * state.
+ */
+ genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
+ WARN_ON(IS_ERR(genpd->opp_table));
+ }
+
+ ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
+ if (ret) {
+ if (genpd->set_performance_state) {
+ dev_pm_opp_put_opp_table(genpd->opp_table);
+ dev_pm_opp_of_remove_table(&genpd->dev);
+ }
+
+ goto unlock;
+ }
+
+ genpd->provider = &np->fwnode;
+ genpd->has_provider = true;
+
+unlock:
+ mutex_unlock(&gpd_list_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
+
+/**
+ * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
+ * @np: Device node pointer associated with the PM domain provider.
+ * @data: Pointer to the data associated with the PM domain provider.
+ */
+int of_genpd_add_provider_onecell(struct device_node *np,
+ struct genpd_onecell_data *data)
+{
+ struct generic_pm_domain *genpd;
+ unsigned int i;
+ int ret = -EINVAL;
+
+ if (!np || !data)
+ return -EINVAL;
+
+ mutex_lock(&gpd_list_lock);
+
+ if (!data->xlate)
+ data->xlate = genpd_xlate_onecell;
+
+ for (i = 0; i < data->num_domains; i++) {
+ genpd = data->domains[i];
+
+ if (!genpd)
+ continue;
+ if (!genpd_present(genpd))
+ goto error;
+
+ genpd->dev.of_node = np;
+
+ /* Parse genpd OPP table */
+ if (genpd->set_performance_state) {
+ ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
+ i, ret);
+ goto error;
+ }
+
+ /*
+ * Save table for faster processing while setting
+ * performance state.
+ */
+ genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i);
+ WARN_ON(IS_ERR(genpd->opp_table));
+ }
+
+ genpd->provider = &np->fwnode;
+ genpd->has_provider = true;
+ }
+
+ ret = genpd_add_provider(np, data->xlate, data);
+ if (ret < 0)
+ goto error;
+
+ mutex_unlock(&gpd_list_lock);
+
+ return 0;
+
+error:
+ while (i--) {
+ genpd = data->domains[i];
+
+ if (!genpd)
+ continue;
+
+ genpd->provider = NULL;
+ genpd->has_provider = false;
+
+ if (genpd->set_performance_state) {
+ dev_pm_opp_put_opp_table(genpd->opp_table);
+ dev_pm_opp_of_remove_table(&genpd->dev);
+ }
+ }
+
+ mutex_unlock(&gpd_list_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
+
+/**
+ * of_genpd_del_provider() - Remove a previously registered PM domain provider
+ * @np: Device node pointer associated with the PM domain provider
+ */
+void of_genpd_del_provider(struct device_node *np)
+{
+ struct of_genpd_provider *cp, *tmp;
+ struct generic_pm_domain *gpd;
+
+ mutex_lock(&gpd_list_lock);
+ mutex_lock(&of_genpd_mutex);
+ list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
+ if (cp->node == np) {
+ /*
+ * For each PM domain associated with the
+ * provider, set the 'has_provider' to false
+ * so that the PM domain can be safely removed.
+ */
+ list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
+ if (gpd->provider == &np->fwnode) {
+ gpd->has_provider = false;
+
+ if (!gpd->set_performance_state)
+ continue;
+
+ dev_pm_opp_put_opp_table(gpd->opp_table);
+ dev_pm_opp_of_remove_table(&gpd->dev);
+ }
+ }
+
+ list_del(&cp->link);
+ of_node_put(cp->node);
+ kfree(cp);
+ break;
+ }
+ }
+ mutex_unlock(&of_genpd_mutex);
+ mutex_unlock(&gpd_list_lock);
+}
+EXPORT_SYMBOL_GPL(of_genpd_del_provider);
+
+/**
+ * genpd_get_from_provider() - Look-up PM domain
+ * @genpdspec: OF phandle args to use for look-up
+ *
+ * Looks for a PM domain provider under the node specified by @genpdspec and if
+ * found, uses xlate function of the provider to map phandle args to a PM
+ * domain.
+ *
+ * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
+ * on failure.
+ */
+static struct generic_pm_domain *genpd_get_from_provider(
+ struct of_phandle_args *genpdspec)
+{
+ struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
+ struct of_genpd_provider *provider;
+
+ if (!genpdspec)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&of_genpd_mutex);
+
+ /* Check if we have such a provider in our array */
+ list_for_each_entry(provider, &of_genpd_providers, link) {
+ if (provider->node == genpdspec->np)
+ genpd = provider->xlate(genpdspec, provider->data);
+ if (!IS_ERR(genpd))
+ break;
+ }
+
+ mutex_unlock(&of_genpd_mutex);
+
+ return genpd;
+}
+
+/**
+ * of_genpd_add_device() - Add a device to an I/O PM domain
+ * @genpdspec: OF phandle args to use for look-up PM domain
+ * @dev: Device to be added.
+ *
+ * Looks-up an I/O PM domain based upon phandle args provided and adds
+ * the device to the PM domain. Returns a negative error code on failure.
+ */
+int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ int ret;
+
+ mutex_lock(&gpd_list_lock);
+
+ genpd = genpd_get_from_provider(genpdspec);
+ if (IS_ERR(genpd)) {
+ ret = PTR_ERR(genpd);
+ goto out;
+ }
+
+ ret = genpd_add_device(genpd, dev, dev);
+
+out:
+ mutex_unlock(&gpd_list_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(of_genpd_add_device);
+
+/**
+ * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
+ * @parent_spec: OF phandle args to use for parent PM domain look-up
+ * @subdomain_spec: OF phandle args to use for subdomain look-up
+ *
+ * Looks-up a parent PM domain and subdomain based upon phandle args
+ * provided and adds the subdomain to the parent PM domain. Returns a
+ * negative error code on failure.
+ */
+int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
+ struct of_phandle_args *subdomain_spec)
+{
+ struct generic_pm_domain *parent, *subdomain;
+ int ret;
+
+ mutex_lock(&gpd_list_lock);
+
+ parent = genpd_get_from_provider(parent_spec);
+ if (IS_ERR(parent)) {
+ ret = PTR_ERR(parent);
+ goto out;
+ }
+
+ subdomain = genpd_get_from_provider(subdomain_spec);
+ if (IS_ERR(subdomain)) {
+ ret = PTR_ERR(subdomain);
+ goto out;
+ }
+
+ ret = genpd_add_subdomain(parent, subdomain);
+
+out:
+ mutex_unlock(&gpd_list_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
+
+/**
+ * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
+ * @parent_spec: OF phandle args to use for parent PM domain look-up
+ * @subdomain_spec: OF phandle args to use for subdomain look-up
+ *
+ * Looks-up a parent PM domain and subdomain based upon phandle args
+ * provided and removes the subdomain from the parent PM domain. Returns a
+ * negative error code on failure.
+ */
+int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
+ struct of_phandle_args *subdomain_spec)
+{
+ struct generic_pm_domain *parent, *subdomain;
+ int ret;
+
+ mutex_lock(&gpd_list_lock);
+
+ parent = genpd_get_from_provider(parent_spec);
+ if (IS_ERR(parent)) {
+ ret = PTR_ERR(parent);
+ goto out;
+ }
+
+ subdomain = genpd_get_from_provider(subdomain_spec);
+ if (IS_ERR(subdomain)) {
+ ret = PTR_ERR(subdomain);
+ goto out;
+ }
+
+ ret = pm_genpd_remove_subdomain(parent, subdomain);
+
+out:
+ mutex_unlock(&gpd_list_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
+
+/**
+ * of_genpd_remove_last - Remove the last PM domain registered for a provider
+ * @provider: Pointer to device structure associated with provider
+ *
+ * Find the last PM domain that was added by a particular provider and
+ * remove this PM domain from the list of PM domains. The provider is
+ * identified by the 'provider' device structure that is passed. The PM
+ * domain will only be removed, if the provider associated with domain
+ * has been removed.
+ *
+ * Returns a valid pointer to struct generic_pm_domain on success or
+ * ERR_PTR() on failure.
+ */
+struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
+{
+ struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
+ int ret;
+
+ if (IS_ERR_OR_NULL(np))
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&gpd_list_lock);
+ list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
+ if (gpd->provider == &np->fwnode) {
+ ret = genpd_remove(gpd);
+ genpd = ret ? ERR_PTR(ret) : gpd;
+ break;
+ }
+ }
+ mutex_unlock(&gpd_list_lock);
+
+ return genpd;
+}
+EXPORT_SYMBOL_GPL(of_genpd_remove_last);
+
+static void genpd_release_dev(struct device *dev)
+{
+ of_node_put(dev->of_node);
+ kfree(dev);
+}
+
+static struct bus_type genpd_bus_type = {
+ .name = "genpd",
+};
+
+/**
+ * genpd_dev_pm_detach - Detach a device from its PM domain.
+ * @dev: Device to detach.
+ * @power_off: Currently not used
+ *
+ * Try to locate a corresponding generic PM domain, which the device was
+ * attached to previously. If such is found, the device is detached from it.
+ */
+static void genpd_dev_pm_detach(struct device *dev, bool power_off)
+{
+ struct generic_pm_domain *pd;
+ unsigned int i;
+ int ret = 0;
+
+ pd = dev_to_genpd(dev);
+ if (IS_ERR(pd))
+ return;
+
+ dev_dbg(dev, "removing from PM domain %s\n", pd->name);
+
+ for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
+ ret = genpd_remove_device(pd, dev);
+ if (ret != -EAGAIN)
+ break;
+
+ mdelay(i);
+ cond_resched();
+ }
+
+ if (ret < 0) {
+ dev_err(dev, "failed to remove from PM domain %s: %d",
+ pd->name, ret);
+ return;
+ }
+
+ /* Check if PM domain can be powered off after removing this device. */
+ genpd_queue_power_off_work(pd);
+
+ /* Unregister the device if it was created by genpd. */
+ if (dev->bus == &genpd_bus_type)
+ device_unregister(dev);
+}
+
+static void genpd_dev_pm_sync(struct device *dev)
+{
+ struct generic_pm_domain *pd;
+
+ pd = dev_to_genpd(dev);
+ if (IS_ERR(pd))
+ return;
+
+ genpd_queue_power_off_work(pd);
+}
+
+static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
+ unsigned int index, bool power_on)
+{
+ struct of_phandle_args pd_args;
+ struct generic_pm_domain *pd;
+ int ret;
+
+ ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
+ "#power-domain-cells", index, &pd_args);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&gpd_list_lock);
+ pd = genpd_get_from_provider(&pd_args);
+ of_node_put(pd_args.np);
+ if (IS_ERR(pd)) {
+ mutex_unlock(&gpd_list_lock);
+ dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
+ __func__, PTR_ERR(pd));
+ return driver_deferred_probe_check_state(base_dev);
+ }
+
+ dev_dbg(dev, "adding to PM domain %s\n", pd->name);
+
+ ret = genpd_add_device(pd, dev, base_dev);
+ mutex_unlock(&gpd_list_lock);
+
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to add to PM domain %s: %d",
+ pd->name, ret);
+ return ret;
+ }
+
+ dev->pm_domain->detach = genpd_dev_pm_detach;
+ dev->pm_domain->sync = genpd_dev_pm_sync;
+
+ if (power_on) {
+ genpd_lock(pd);
+ ret = genpd_power_on(pd, 0);
+ genpd_unlock(pd);
+ }
+
+ if (ret)
+ genpd_remove_device(pd, dev);
+
+ return ret ? -EPROBE_DEFER : 1;
+}
+
+/**
+ * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
+ * @dev: Device to attach.
+ *
+ * Parse device's OF node to find a PM domain specifier. If such is found,
+ * attaches the device to retrieved pm_domain ops.
+ *
+ * Returns 1 on successfully attached PM domain, 0 when the device don't need a
+ * PM domain or when multiple power-domains exists for it, else a negative error
+ * code. Note that if a power-domain exists for the device, but it cannot be
+ * found or turned on, then return -EPROBE_DEFER to ensure that the device is
+ * not probed and to re-try again later.
+ */
+int genpd_dev_pm_attach(struct device *dev)
+{
+ if (!dev->of_node)
+ return 0;
+
+ /*
+ * Devices with multiple PM domains must be attached separately, as we
+ * can only attach one PM domain per device.
+ */
+ if (of_count_phandle_with_args(dev->of_node, "power-domains",
+ "#power-domain-cells") != 1)
+ return 0;
+
+ return __genpd_dev_pm_attach(dev, dev, 0, true);
+}
+EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
+
+/**
+ * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
+ * @dev: The device used to lookup the PM domain.
+ * @index: The index of the PM domain.
+ *
+ * Parse device's OF node to find a PM domain specifier at the provided @index.
+ * If such is found, creates a virtual device and attaches it to the retrieved
+ * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
+ * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
+ *
+ * Returns the created virtual device if successfully attached PM domain, NULL
+ * when the device don't need a PM domain, else an ERR_PTR() in case of
+ * failures. If a power-domain exists for the device, but cannot be found or
+ * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
+ * is not probed and to re-try again later.
+ */
+struct device *genpd_dev_pm_attach_by_id(struct device *dev,
+ unsigned int index)
+{
+ struct device *virt_dev;
+ int num_domains;
+ int ret;
+
+ if (!dev->of_node)
+ return NULL;
+
+ /* Verify that the index is within a valid range. */
+ num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
+ "#power-domain-cells");
+ if (index >= num_domains)
+ return NULL;
+
+ /* Allocate and register device on the genpd bus. */
+ virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
+ if (!virt_dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
+ virt_dev->bus = &genpd_bus_type;
+ virt_dev->release = genpd_release_dev;
+ virt_dev->of_node = of_node_get(dev->of_node);
+
+ ret = device_register(virt_dev);
+ if (ret) {
+ put_device(virt_dev);
+ return ERR_PTR(ret);
+ }
+
+ /* Try to attach the device to the PM domain at the specified index. */
+ ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
+ if (ret < 1) {
+ device_unregister(virt_dev);
+ return ret ? ERR_PTR(ret) : NULL;
+ }
+
+ pm_runtime_enable(virt_dev);
+ genpd_queue_power_off_work(dev_to_genpd(virt_dev));
+
+ return virt_dev;
+}
+EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
+
+/**
+ * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
+ * @dev: The device used to lookup the PM domain.
+ * @name: The name of the PM domain.
+ *
+ * Parse device's OF node to find a PM domain specifier using the
+ * power-domain-names DT property. For further description see
+ * genpd_dev_pm_attach_by_id().
+ */
+struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
+{
+ int index;
+
+ if (!dev->of_node)
+ return NULL;
+
+ index = of_property_match_string(dev->of_node, "power-domain-names",
+ name);
+ if (index < 0)
+ return NULL;
+
+ return genpd_dev_pm_attach_by_id(dev, index);
+}
+
+static const struct of_device_id idle_state_match[] = {
+ { .compatible = "domain-idle-state", },
+ { }
+};
+
+static int genpd_parse_state(struct genpd_power_state *genpd_state,
+ struct device_node *state_node)
+{
+ int err;
+ u32 residency;
+ u32 entry_latency, exit_latency;
+
+ err = of_property_read_u32(state_node, "entry-latency-us",
+ &entry_latency);
+ if (err) {
+ pr_debug(" * %pOF missing entry-latency-us property\n",
+ state_node);
+ return -EINVAL;
+ }
+
+ err = of_property_read_u32(state_node, "exit-latency-us",
+ &exit_latency);
+ if (err) {
+ pr_debug(" * %pOF missing exit-latency-us property\n",
+ state_node);
+ return -EINVAL;
+ }
+
+ err = of_property_read_u32(state_node, "min-residency-us", &residency);
+ if (!err)
+ genpd_state->residency_ns = 1000LL * residency;
+
+ genpd_state->power_on_latency_ns = 1000LL * exit_latency;
+ genpd_state->power_off_latency_ns = 1000LL * entry_latency;
+ genpd_state->fwnode = &state_node->fwnode;
+
+ return 0;
+}
+
+static int genpd_iterate_idle_states(struct device_node *dn,
+ struct genpd_power_state *states)
+{
+ int ret;
+ struct of_phandle_iterator it;
+ struct device_node *np;
+ int i = 0;
+
+ ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
+ if (ret <= 0)
+ return ret == -ENOENT ? 0 : ret;
+
+ /* Loop over the phandles until all the requested entry is found */
+ of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
+ np = it.node;
+ if (!of_match_node(idle_state_match, np))
+ continue;
+
+ if (!of_device_is_available(np))
+ continue;
+
+ if (states) {
+ ret = genpd_parse_state(&states[i], np);
+ if (ret) {
+ pr_err("Parsing idle state node %pOF failed with err %d\n",
+ np, ret);
+ of_node_put(np);
+ return ret;
+ }
+ }
+ i++;
+ }
+
+ return i;
+}
+
+/**
+ * of_genpd_parse_idle_states: Return array of idle states for the genpd.
+ *
+ * @dn: The genpd device node
+ * @states: The pointer to which the state array will be saved.
+ * @n: The count of elements in the array returned from this function.
+ *
+ * Returns the device states parsed from the OF node. The memory for the states
+ * is allocated by this function and is the responsibility of the caller to
+ * free the memory after use. If any or zero compatible domain idle states is
+ * found it returns 0 and in case of errors, a negative error code is returned.
+ */
+int of_genpd_parse_idle_states(struct device_node *dn,
+ struct genpd_power_state **states, int *n)
+{
+ struct genpd_power_state *st;
+ int ret;
+
+ ret = genpd_iterate_idle_states(dn, NULL);
+ if (ret < 0)
+ return ret;
+
+ if (!ret) {
+ *states = NULL;
+ *n = 0;
+ return 0;
+ }
+
+ st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ ret = genpd_iterate_idle_states(dn, st);
+ if (ret <= 0) {
+ kfree(st);
+ return ret < 0 ? ret : -EINVAL;
+ }
+
+ *states = st;
+ *n = ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
+
+/**
+ * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
+ *
+ * @genpd_dev: Genpd's device for which the performance-state needs to be found.
+ * @opp: struct dev_pm_opp of the OPP for which we need to find performance
+ * state.
+ *
+ * Returns performance state encoded in the OPP of the genpd. This calls
+ * platform specific genpd->opp_to_performance_state() callback to translate
+ * power domain OPP to performance state.
+ *
+ * Returns performance state on success and 0 on failure.
+ */
+unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
+ struct dev_pm_opp *opp)
+{
+ struct generic_pm_domain *genpd = NULL;
+ int state;
+
+ genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
+
+ if (unlikely(!genpd->opp_to_performance_state))
+ return 0;
+
+ genpd_lock(genpd);
+ state = genpd->opp_to_performance_state(genpd, opp);
+ genpd_unlock(genpd);
+
+ return state;
+}
+EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
+
+static int __init genpd_bus_init(void)
+{
+ return bus_register(&genpd_bus_type);
+}
+core_initcall(genpd_bus_init);
+
+#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
+
+
+/*** debugfs support ***/
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/pm.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/init.h>
+#include <linux/kobject.h>
+static struct dentry *genpd_debugfs_dir;
+
+/*
+ * TODO: This function is a slightly modified version of rtpm_status_show
+ * from sysfs.c, so generalize it.
+ */
+static void rtpm_status_str(struct seq_file *s, struct device *dev)
+{
+ static const char * const status_lookup[] = {
+ [RPM_ACTIVE] = "active",
+ [RPM_RESUMING] = "resuming",
+ [RPM_SUSPENDED] = "suspended",
+ [RPM_SUSPENDING] = "suspending"
+ };
+ const char *p = "";
+
+ if (dev->power.runtime_error)
+ p = "error";
+ else if (dev->power.disable_depth)
+ p = "unsupported";
+ else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
+ p = status_lookup[dev->power.runtime_status];
+ else
+ WARN_ON(1);
+
+ seq_puts(s, p);
+}
+
+static int genpd_summary_one(struct seq_file *s,
+ struct generic_pm_domain *genpd)
+{
+ static const char * const status_lookup[] = {
+ [GENPD_STATE_ON] = "on",
+ [GENPD_STATE_OFF] = "off"
+ };
+ struct pm_domain_data *pm_data;
+ const char *kobj_path;
+ struct gpd_link *link;
+ char state[16];
+ int ret;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
+ goto exit;
+ if (!genpd_status_on(genpd))
+ snprintf(state, sizeof(state), "%s-%u",
+ status_lookup[genpd->status], genpd->state_idx);
+ else
+ snprintf(state, sizeof(state), "%s",
+ status_lookup[genpd->status]);
+ seq_printf(s, "%-30s %-15s ", genpd->name, state);
+
+ /*
+ * Modifications on the list require holding locks on both
+ * parent and child, so we are safe.
+ * Also genpd->name is immutable.
+ */
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ seq_printf(s, "%s", link->child->name);
+ if (!list_is_last(&link->parent_node, &genpd->parent_links))
+ seq_puts(s, ", ");
+ }
+
+ list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
+ kobj_path = kobject_get_path(&pm_data->dev->kobj,
+ genpd_is_irq_safe(genpd) ?
+ GFP_ATOMIC : GFP_KERNEL);
+ if (kobj_path == NULL)
+ continue;
+
+ seq_printf(s, "\n %-50s ", kobj_path);
+ rtpm_status_str(s, pm_data->dev);
+ kfree(kobj_path);
+ }
+
+ seq_puts(s, "\n");
+exit:
+ genpd_unlock(genpd);
+
+ return 0;
+}
+
+static int summary_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *genpd;
+ int ret = 0;
+
+ seq_puts(s, "domain status children\n");
+ seq_puts(s, " /device runtime status\n");
+ seq_puts(s, "----------------------------------------------------------------------\n");
+
+ ret = mutex_lock_interruptible(&gpd_list_lock);
+ if (ret)
+ return -ERESTARTSYS;
+
+ list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
+ ret = genpd_summary_one(s, genpd);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&gpd_list_lock);
+
+ return ret;
+}
+
+static int status_show(struct seq_file *s, void *data)
+{
+ static const char * const status_lookup[] = {
+ [GENPD_STATE_ON] = "on",
+ [GENPD_STATE_OFF] = "off"
+ };
+
+ struct generic_pm_domain *genpd = s->private;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
+ goto exit;
+
+ if (genpd->status == GENPD_STATE_OFF)
+ seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
+ genpd->state_idx);
+ else
+ seq_printf(s, "%s\n", status_lookup[genpd->status]);
+exit:
+ genpd_unlock(genpd);
+ return ret;
+}
+
+static int sub_domains_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *genpd = s->private;
+ struct gpd_link *link;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ list_for_each_entry(link, &genpd->parent_links, parent_node)
+ seq_printf(s, "%s\n", link->child->name);
+
+ genpd_unlock(genpd);
+ return ret;
+}
+
+static int idle_states_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *genpd = s->private;
+ unsigned int i;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ seq_puts(s, "State Time Spent(ms) Usage Rejected\n");
+
+ for (i = 0; i < genpd->state_count; i++) {
+ ktime_t delta = 0;
+ s64 msecs;
+
+ if ((genpd->status == GENPD_STATE_OFF) &&
+ (genpd->state_idx == i))
+ delta = ktime_sub(ktime_get(), genpd->accounting_time);
+
+ msecs = ktime_to_ms(
+ ktime_add(genpd->states[i].idle_time, delta));
+ seq_printf(s, "S%-13i %-14lld %-14llu %llu\n", i, msecs,
+ genpd->states[i].usage, genpd->states[i].rejected);
+ }
+
+ genpd_unlock(genpd);
+ return ret;
+}
+
+static int active_time_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *genpd = s->private;
+ ktime_t delta = 0;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ if (genpd->status == GENPD_STATE_ON)
+ delta = ktime_sub(ktime_get(), genpd->accounting_time);
+
+ seq_printf(s, "%lld ms\n", ktime_to_ms(
+ ktime_add(genpd->on_time, delta)));
+
+ genpd_unlock(genpd);
+ return ret;
+}
+
+static int total_idle_time_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *genpd = s->private;
+ ktime_t delta = 0, total = 0;
+ unsigned int i;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ for (i = 0; i < genpd->state_count; i++) {
+
+ if ((genpd->status == GENPD_STATE_OFF) &&
+ (genpd->state_idx == i))
+ delta = ktime_sub(ktime_get(), genpd->accounting_time);
+
+ total = ktime_add(total, genpd->states[i].idle_time);
+ }
+ total = ktime_add(total, delta);
+
+ seq_printf(s, "%lld ms\n", ktime_to_ms(total));
+
+ genpd_unlock(genpd);
+ return ret;
+}
+
+
+static int devices_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *genpd = s->private;
+ struct pm_domain_data *pm_data;
+ const char *kobj_path;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
+ kobj_path = kobject_get_path(&pm_data->dev->kobj,
+ genpd_is_irq_safe(genpd) ?
+ GFP_ATOMIC : GFP_KERNEL);
+ if (kobj_path == NULL)
+ continue;
+
+ seq_printf(s, "%s\n", kobj_path);
+ kfree(kobj_path);
+ }
+
+ genpd_unlock(genpd);
+ return ret;
+}
+
+static int perf_state_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *genpd = s->private;
+
+ if (genpd_lock_interruptible(genpd))
+ return -ERESTARTSYS;
+
+ seq_printf(s, "%u\n", genpd->performance_state);
+
+ genpd_unlock(genpd);
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(summary);
+DEFINE_SHOW_ATTRIBUTE(status);
+DEFINE_SHOW_ATTRIBUTE(sub_domains);
+DEFINE_SHOW_ATTRIBUTE(idle_states);
+DEFINE_SHOW_ATTRIBUTE(active_time);
+DEFINE_SHOW_ATTRIBUTE(total_idle_time);
+DEFINE_SHOW_ATTRIBUTE(devices);
+DEFINE_SHOW_ATTRIBUTE(perf_state);
+
+static int __init genpd_debug_init(void)
+{
+ struct dentry *d;
+ struct generic_pm_domain *genpd;
+
+ genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
+
+ debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
+ NULL, &summary_fops);
+
+ list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
+ d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
+
+ debugfs_create_file("current_state", 0444,
+ d, genpd, &status_fops);
+ debugfs_create_file("sub_domains", 0444,
+ d, genpd, &sub_domains_fops);
+ debugfs_create_file("idle_states", 0444,
+ d, genpd, &idle_states_fops);
+ debugfs_create_file("active_time", 0444,
+ d, genpd, &active_time_fops);
+ debugfs_create_file("total_idle_time", 0444,
+ d, genpd, &total_idle_time_fops);
+ debugfs_create_file("devices", 0444,
+ d, genpd, &devices_fops);
+ if (genpd->set_performance_state)
+ debugfs_create_file("perf_state", 0444,
+ d, genpd, &perf_state_fops);
+ }
+
+ return 0;
+}
+late_initcall(genpd_debug_init);
+
+static void __exit genpd_debug_exit(void)
+{
+ debugfs_remove_recursive(genpd_debugfs_dir);
+}
+__exitcall(genpd_debug_exit);
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
new file mode 100644
index 000000000..490ed7deb
--- /dev/null
+++ b/drivers/base/power/domain_governor.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/base/power/domain_governor.c - Governors for device PM domains.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ */
+#include <linux/kernel.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_qos.h>
+#include <linux/hrtimer.h>
+#include <linux/cpuidle.h>
+#include <linux/cpumask.h>
+#include <linux/ktime.h>
+
+static int dev_update_qos_constraint(struct device *dev, void *data)
+{
+ s64 *constraint_ns_p = data;
+ s64 constraint_ns;
+
+ if (dev->power.subsys_data && dev->power.subsys_data->domain_data) {
+ /*
+ * Only take suspend-time QoS constraints of devices into
+ * account, because constraints updated after the device has
+ * been suspended are not guaranteed to be taken into account
+ * anyway. In order for them to take effect, the device has to
+ * be resumed and suspended again.
+ */
+ constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns;
+ } else {
+ /*
+ * The child is not in a domain and there's no info on its
+ * suspend/resume latencies, so assume them to be negligible and
+ * take its current PM QoS constraint (that's the only thing
+ * known at this point anyway).
+ */
+ constraint_ns = dev_pm_qos_read_value(dev, DEV_PM_QOS_RESUME_LATENCY);
+ constraint_ns *= NSEC_PER_USEC;
+ }
+
+ if (constraint_ns < *constraint_ns_p)
+ *constraint_ns_p = constraint_ns;
+
+ return 0;
+}
+
+/**
+ * default_suspend_ok - Default PM domain governor routine to suspend devices.
+ * @dev: Device to check.
+ */
+static bool default_suspend_ok(struct device *dev)
+{
+ struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
+ unsigned long flags;
+ s64 constraint_ns;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ if (!td->constraint_changed) {
+ bool ret = td->cached_suspend_ok;
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+ return ret;
+ }
+ td->constraint_changed = false;
+ td->cached_suspend_ok = false;
+ td->effective_constraint_ns = 0;
+ constraint_ns = __dev_pm_qos_resume_latency(dev);
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ if (constraint_ns == 0)
+ return false;
+
+ constraint_ns *= NSEC_PER_USEC;
+ /*
+ * We can walk the children without any additional locking, because
+ * they all have been suspended at this point and their
+ * effective_constraint_ns fields won't be modified in parallel with us.
+ */
+ if (!dev->power.ignore_children)
+ device_for_each_child(dev, &constraint_ns,
+ dev_update_qos_constraint);
+
+ if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS) {
+ /* "No restriction", so the device is allowed to suspend. */
+ td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
+ td->cached_suspend_ok = true;
+ } else if (constraint_ns == 0) {
+ /*
+ * This triggers if one of the children that don't belong to a
+ * domain has a zero PM QoS constraint and it's better not to
+ * suspend then. effective_constraint_ns is zero already and
+ * cached_suspend_ok is false, so bail out.
+ */
+ return false;
+ } else {
+ constraint_ns -= td->suspend_latency_ns +
+ td->resume_latency_ns;
+ /*
+ * effective_constraint_ns is zero already and cached_suspend_ok
+ * is false, so if the computed value is not positive, return
+ * right away.
+ */
+ if (constraint_ns <= 0)
+ return false;
+
+ td->effective_constraint_ns = constraint_ns;
+ td->cached_suspend_ok = true;
+ }
+
+ /*
+ * The children have been suspended already, so we don't need to take
+ * their suspend latencies into account here.
+ */
+ return td->cached_suspend_ok;
+}
+
+static bool __default_power_down_ok(struct dev_pm_domain *pd,
+ unsigned int state)
+{
+ struct generic_pm_domain *genpd = pd_to_genpd(pd);
+ struct gpd_link *link;
+ struct pm_domain_data *pdd;
+ s64 min_off_time_ns;
+ s64 off_on_time_ns;
+
+ off_on_time_ns = genpd->states[state].power_off_latency_ns +
+ genpd->states[state].power_on_latency_ns;
+
+ min_off_time_ns = -1;
+ /*
+ * Check if subdomains can be off for enough time.
+ *
+ * All subdomains have been powered off already at this point.
+ */
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ struct generic_pm_domain *sd = link->child;
+ s64 sd_max_off_ns = sd->max_off_time_ns;
+
+ if (sd_max_off_ns < 0)
+ continue;
+
+ /*
+ * Check if the subdomain is allowed to be off long enough for
+ * the current domain to turn off and on (that's how much time
+ * it will have to wait worst case).
+ */
+ if (sd_max_off_ns <= off_on_time_ns)
+ return false;
+
+ if (min_off_time_ns > sd_max_off_ns || min_off_time_ns < 0)
+ min_off_time_ns = sd_max_off_ns;
+ }
+
+ /*
+ * Check if the devices in the domain can be off enough time.
+ */
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ struct gpd_timing_data *td;
+ s64 constraint_ns;
+
+ /*
+ * Check if the device is allowed to be off long enough for the
+ * domain to turn off and on (that's how much time it will
+ * have to wait worst case).
+ */
+ td = &to_gpd_data(pdd)->td;
+ constraint_ns = td->effective_constraint_ns;
+ /*
+ * Zero means "no suspend at all" and this runs only when all
+ * devices in the domain are suspended, so it must be positive.
+ */
+ if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS)
+ continue;
+
+ if (constraint_ns <= off_on_time_ns)
+ return false;
+
+ if (min_off_time_ns > constraint_ns || min_off_time_ns < 0)
+ min_off_time_ns = constraint_ns;
+ }
+
+ /*
+ * If the computed minimum device off time is negative, there are no
+ * latency constraints, so the domain can spend arbitrary time in the
+ * "off" state.
+ */
+ if (min_off_time_ns < 0)
+ return true;
+
+ /*
+ * The difference between the computed minimum subdomain or device off
+ * time and the time needed to turn the domain on is the maximum
+ * theoretical time this domain can spend in the "off" state.
+ */
+ genpd->max_off_time_ns = min_off_time_ns -
+ genpd->states[state].power_on_latency_ns;
+ return true;
+}
+
+/**
+ * default_power_down_ok - Default generic PM domain power off governor routine.
+ * @pd: PM domain to check.
+ *
+ * This routine must be executed under the PM domain's lock.
+ */
+static bool default_power_down_ok(struct dev_pm_domain *pd)
+{
+ struct generic_pm_domain *genpd = pd_to_genpd(pd);
+ struct gpd_link *link;
+
+ if (!genpd->max_off_time_changed) {
+ genpd->state_idx = genpd->cached_power_down_state_idx;
+ return genpd->cached_power_down_ok;
+ }
+
+ /*
+ * We have to invalidate the cached results for the parents, so
+ * use the observation that default_power_down_ok() is not
+ * going to be called for any parent until this instance
+ * returns.
+ */
+ list_for_each_entry(link, &genpd->child_links, child_node)
+ link->parent->max_off_time_changed = true;
+
+ genpd->max_off_time_ns = -1;
+ genpd->max_off_time_changed = false;
+ genpd->cached_power_down_ok = true;
+ genpd->state_idx = genpd->state_count - 1;
+
+ /* Find a state to power down to, starting from the deepest. */
+ while (!__default_power_down_ok(pd, genpd->state_idx)) {
+ if (genpd->state_idx == 0) {
+ genpd->cached_power_down_ok = false;
+ break;
+ }
+ genpd->state_idx--;
+ }
+
+ genpd->cached_power_down_state_idx = genpd->state_idx;
+ return genpd->cached_power_down_ok;
+}
+
+static bool always_on_power_down_ok(struct dev_pm_domain *domain)
+{
+ return false;
+}
+
+#ifdef CONFIG_CPU_IDLE
+static bool cpu_power_down_ok(struct dev_pm_domain *pd)
+{
+ struct generic_pm_domain *genpd = pd_to_genpd(pd);
+ struct cpuidle_device *dev;
+ ktime_t domain_wakeup, next_hrtimer;
+ s64 idle_duration_ns;
+ int cpu, i;
+
+ /* Validate dev PM QoS constraints. */
+ if (!default_power_down_ok(pd))
+ return false;
+
+ if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN))
+ return true;
+
+ /*
+ * Find the next wakeup for any of the online CPUs within the PM domain
+ * and its subdomains. Note, we only need the genpd->cpus, as it already
+ * contains a mask of all CPUs from subdomains.
+ */
+ domain_wakeup = ktime_set(KTIME_SEC_MAX, 0);
+ for_each_cpu_and(cpu, genpd->cpus, cpu_online_mask) {
+ dev = per_cpu(cpuidle_devices, cpu);
+ if (dev) {
+ next_hrtimer = READ_ONCE(dev->next_hrtimer);
+ if (ktime_before(next_hrtimer, domain_wakeup))
+ domain_wakeup = next_hrtimer;
+ }
+ }
+
+ /* The minimum idle duration is from now - until the next wakeup. */
+ idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, ktime_get()));
+ if (idle_duration_ns <= 0)
+ return false;
+
+ /*
+ * Find the deepest idle state that has its residency value satisfied
+ * and by also taking into account the power off latency for the state.
+ * Start at the state picked by the dev PM QoS constraint validation.
+ */
+ i = genpd->state_idx;
+ do {
+ if (idle_duration_ns >= (genpd->states[i].residency_ns +
+ genpd->states[i].power_off_latency_ns)) {
+ genpd->state_idx = i;
+ return true;
+ }
+ } while (--i >= 0);
+
+ return false;
+}
+
+struct dev_power_governor pm_domain_cpu_gov = {
+ .suspend_ok = default_suspend_ok,
+ .power_down_ok = cpu_power_down_ok,
+};
+#endif
+
+struct dev_power_governor simple_qos_governor = {
+ .suspend_ok = default_suspend_ok,
+ .power_down_ok = default_power_down_ok,
+};
+
+/**
+ * pm_genpd_gov_always_on - A governor implementing an always-on policy
+ */
+struct dev_power_governor pm_domain_always_on_gov = {
+ .power_down_ok = always_on_power_down_ok,
+ .suspend_ok = default_suspend_ok,
+};
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
new file mode 100644
index 000000000..4fa525668
--- /dev/null
+++ b/drivers/base/power/generic_ops.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/base/power/generic_ops.c - Generic PM callbacks for subsystems
+ *
+ * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ */
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/export.h>
+
+#ifdef CONFIG_PM
+/**
+ * pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems.
+ * @dev: Device to suspend.
+ *
+ * If PM operations are defined for the @dev's driver and they include
+ * ->runtime_suspend(), execute it and return its error code. Otherwise,
+ * return 0.
+ */
+int pm_generic_runtime_suspend(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int ret;
+
+ ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
+
+/**
+ * pm_generic_runtime_resume - Generic runtime resume callback for subsystems.
+ * @dev: Device to resume.
+ *
+ * If PM operations are defined for the @dev's driver and they include
+ * ->runtime_resume(), execute it and return its error code. Otherwise,
+ * return 0.
+ */
+int pm_generic_runtime_resume(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int ret;
+
+ ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_generic_runtime_resume);
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * pm_generic_prepare - Generic routine preparing a device for power transition.
+ * @dev: Device to prepare.
+ *
+ * Prepare a device for a system-wide power transition.
+ */
+int pm_generic_prepare(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (drv && drv->pm && drv->pm->prepare)
+ ret = drv->pm->prepare(dev);
+
+ return ret;
+}
+
+/**
+ * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
+ * @dev: Device to suspend.
+ */
+int pm_generic_suspend_noirq(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
+
+/**
+ * pm_generic_suspend_late - Generic suspend_late callback for subsystems.
+ * @dev: Device to suspend.
+ */
+int pm_generic_suspend_late(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->suspend_late ? pm->suspend_late(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_suspend_late);
+
+/**
+ * pm_generic_suspend - Generic suspend callback for subsystems.
+ * @dev: Device to suspend.
+ */
+int pm_generic_suspend(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->suspend ? pm->suspend(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_suspend);
+
+/**
+ * pm_generic_freeze_noirq - Generic freeze_noirq callback for subsystems.
+ * @dev: Device to freeze.
+ */
+int pm_generic_freeze_noirq(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
+
+/**
+ * pm_generic_freeze_late - Generic freeze_late callback for subsystems.
+ * @dev: Device to freeze.
+ */
+int pm_generic_freeze_late(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->freeze_late ? pm->freeze_late(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_freeze_late);
+
+/**
+ * pm_generic_freeze - Generic freeze callback for subsystems.
+ * @dev: Device to freeze.
+ */
+int pm_generic_freeze(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->freeze ? pm->freeze(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_freeze);
+
+/**
+ * pm_generic_poweroff_noirq - Generic poweroff_noirq callback for subsystems.
+ * @dev: Device to handle.
+ */
+int pm_generic_poweroff_noirq(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
+
+/**
+ * pm_generic_poweroff_late - Generic poweroff_late callback for subsystems.
+ * @dev: Device to handle.
+ */
+int pm_generic_poweroff_late(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_poweroff_late);
+
+/**
+ * pm_generic_poweroff - Generic poweroff callback for subsystems.
+ * @dev: Device to handle.
+ */
+int pm_generic_poweroff(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->poweroff ? pm->poweroff(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_poweroff);
+
+/**
+ * pm_generic_thaw_noirq - Generic thaw_noirq callback for subsystems.
+ * @dev: Device to thaw.
+ */
+int pm_generic_thaw_noirq(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
+
+/**
+ * pm_generic_thaw_early - Generic thaw_early callback for subsystems.
+ * @dev: Device to thaw.
+ */
+int pm_generic_thaw_early(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->thaw_early ? pm->thaw_early(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_thaw_early);
+
+/**
+ * pm_generic_thaw - Generic thaw callback for subsystems.
+ * @dev: Device to thaw.
+ */
+int pm_generic_thaw(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->thaw ? pm->thaw(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_thaw);
+
+/**
+ * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
+ * @dev: Device to resume.
+ */
+int pm_generic_resume_noirq(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
+
+/**
+ * pm_generic_resume_early - Generic resume_early callback for subsystems.
+ * @dev: Device to resume.
+ */
+int pm_generic_resume_early(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->resume_early ? pm->resume_early(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_resume_early);
+
+/**
+ * pm_generic_resume - Generic resume callback for subsystems.
+ * @dev: Device to resume.
+ */
+int pm_generic_resume(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->resume ? pm->resume(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_resume);
+
+/**
+ * pm_generic_restore_noirq - Generic restore_noirq callback for subsystems.
+ * @dev: Device to restore.
+ */
+int pm_generic_restore_noirq(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
+
+/**
+ * pm_generic_restore_early - Generic restore_early callback for subsystems.
+ * @dev: Device to resume.
+ */
+int pm_generic_restore_early(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->restore_early ? pm->restore_early(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_restore_early);
+
+/**
+ * pm_generic_restore - Generic restore callback for subsystems.
+ * @dev: Device to restore.
+ */
+int pm_generic_restore(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->restore ? pm->restore(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_restore);
+
+/**
+ * pm_generic_complete - Generic routine completing a device power transition.
+ * @dev: Device to handle.
+ *
+ * Complete a device power transition during a system-wide power transition.
+ */
+void pm_generic_complete(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+
+ if (drv && drv->pm && drv->pm->complete)
+ drv->pm->complete(dev);
+}
+#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
new file mode 100644
index 000000000..1dbaaddf5
--- /dev/null
+++ b/drivers/base/power/main.c
@@ -0,0 +1,2015 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/base/power/main.c - Where the driver meets power management.
+ *
+ * Copyright (c) 2003 Patrick Mochel
+ * Copyright (c) 2003 Open Source Development Lab
+ *
+ * The driver model core calls device_pm_add() when a device is registered.
+ * This will initialize the embedded device_pm_info object in the device
+ * and add it to the list of power-controlled devices. sysfs entries for
+ * controlling device power management will also be added.
+ *
+ * A separate list is used for keeping track of power info, because the power
+ * domain dependencies may differ from the ancestral dependencies that the
+ * subsystem list maintains.
+ */
+
+#define pr_fmt(fmt) "PM: " fmt
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm-trace.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/async.h>
+#include <linux/suspend.h>
+#include <trace/events/power.h>
+#include <linux/cpufreq.h>
+#include <linux/cpuidle.h>
+#include <linux/devfreq.h>
+#include <linux/timer.h>
+
+#include "../base.h"
+#include "power.h"
+
+typedef int (*pm_callback_t)(struct device *);
+
+#define list_for_each_entry_rcu_locked(pos, head, member) \
+ list_for_each_entry_rcu(pos, head, member, \
+ device_links_read_lock_held())
+
+/*
+ * The entries in the dpm_list list are in a depth first order, simply
+ * because children are guaranteed to be discovered after parents, and
+ * are inserted at the back of the list on discovery.
+ *
+ * Since device_pm_add() may be called with a device lock held,
+ * we must never try to acquire a device lock while holding
+ * dpm_list_mutex.
+ */
+
+LIST_HEAD(dpm_list);
+static LIST_HEAD(dpm_prepared_list);
+static LIST_HEAD(dpm_suspended_list);
+static LIST_HEAD(dpm_late_early_list);
+static LIST_HEAD(dpm_noirq_list);
+
+struct suspend_stats suspend_stats;
+static DEFINE_MUTEX(dpm_list_mtx);
+static pm_message_t pm_transition;
+
+static int async_error;
+
+static const char *pm_verb(int event)
+{
+ switch (event) {
+ case PM_EVENT_SUSPEND:
+ return "suspend";
+ case PM_EVENT_RESUME:
+ return "resume";
+ case PM_EVENT_FREEZE:
+ return "freeze";
+ case PM_EVENT_QUIESCE:
+ return "quiesce";
+ case PM_EVENT_HIBERNATE:
+ return "hibernate";
+ case PM_EVENT_THAW:
+ return "thaw";
+ case PM_EVENT_RESTORE:
+ return "restore";
+ case PM_EVENT_RECOVER:
+ return "recover";
+ default:
+ return "(unknown PM event)";
+ }
+}
+
+/**
+ * device_pm_sleep_init - Initialize system suspend-related device fields.
+ * @dev: Device object being initialized.
+ */
+void device_pm_sleep_init(struct device *dev)
+{
+ dev->power.is_prepared = false;
+ dev->power.is_suspended = false;
+ dev->power.is_noirq_suspended = false;
+ dev->power.is_late_suspended = false;
+ init_completion(&dev->power.completion);
+ complete_all(&dev->power.completion);
+ dev->power.wakeup = NULL;
+ INIT_LIST_HEAD(&dev->power.entry);
+}
+
+/**
+ * device_pm_lock - Lock the list of active devices used by the PM core.
+ */
+void device_pm_lock(void)
+{
+ mutex_lock(&dpm_list_mtx);
+}
+
+/**
+ * device_pm_unlock - Unlock the list of active devices used by the PM core.
+ */
+void device_pm_unlock(void)
+{
+ mutex_unlock(&dpm_list_mtx);
+}
+
+/**
+ * device_pm_add - Add a device to the PM core's list of active devices.
+ * @dev: Device to add to the list.
+ */
+void device_pm_add(struct device *dev)
+{
+ /* Skip PM setup/initialization. */
+ if (device_pm_not_required(dev))
+ return;
+
+ pr_debug("Adding info for %s:%s\n",
+ dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
+ device_pm_check_callbacks(dev);
+ mutex_lock(&dpm_list_mtx);
+ if (dev->parent && dev->parent->power.is_prepared)
+ dev_warn(dev, "parent %s should not be sleeping\n",
+ dev_name(dev->parent));
+ list_add_tail(&dev->power.entry, &dpm_list);
+ dev->power.in_dpm_list = true;
+ mutex_unlock(&dpm_list_mtx);
+}
+
+/**
+ * device_pm_remove - Remove a device from the PM core's list of active devices.
+ * @dev: Device to be removed from the list.
+ */
+void device_pm_remove(struct device *dev)
+{
+ if (device_pm_not_required(dev))
+ return;
+
+ pr_debug("Removing info for %s:%s\n",
+ dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
+ complete_all(&dev->power.completion);
+ mutex_lock(&dpm_list_mtx);
+ list_del_init(&dev->power.entry);
+ dev->power.in_dpm_list = false;
+ mutex_unlock(&dpm_list_mtx);
+ device_wakeup_disable(dev);
+ pm_runtime_remove(dev);
+ device_pm_check_callbacks(dev);
+}
+
+/**
+ * device_pm_move_before - Move device in the PM core's list of active devices.
+ * @deva: Device to move in dpm_list.
+ * @devb: Device @deva should come before.
+ */
+void device_pm_move_before(struct device *deva, struct device *devb)
+{
+ pr_debug("Moving %s:%s before %s:%s\n",
+ deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
+ devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
+ /* Delete deva from dpm_list and reinsert before devb. */
+ list_move_tail(&deva->power.entry, &devb->power.entry);
+}
+
+/**
+ * device_pm_move_after - Move device in the PM core's list of active devices.
+ * @deva: Device to move in dpm_list.
+ * @devb: Device @deva should come after.
+ */
+void device_pm_move_after(struct device *deva, struct device *devb)
+{
+ pr_debug("Moving %s:%s after %s:%s\n",
+ deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
+ devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
+ /* Delete deva from dpm_list and reinsert after devb. */
+ list_move(&deva->power.entry, &devb->power.entry);
+}
+
+/**
+ * device_pm_move_last - Move device to end of the PM core's list of devices.
+ * @dev: Device to move in dpm_list.
+ */
+void device_pm_move_last(struct device *dev)
+{
+ pr_debug("Moving %s:%s to end of list\n",
+ dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
+ list_move_tail(&dev->power.entry, &dpm_list);
+}
+
+static ktime_t initcall_debug_start(struct device *dev, void *cb)
+{
+ if (!pm_print_times_enabled)
+ return 0;
+
+ dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
+ task_pid_nr(current),
+ dev->parent ? dev_name(dev->parent) : "none");
+ return ktime_get();
+}
+
+static void initcall_debug_report(struct device *dev, ktime_t calltime,
+ void *cb, int error)
+{
+ ktime_t rettime;
+ s64 nsecs;
+
+ if (!pm_print_times_enabled)
+ return;
+
+ rettime = ktime_get();
+ nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
+
+ dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
+ (unsigned long long)nsecs >> 10);
+}
+
+/**
+ * dpm_wait - Wait for a PM operation to complete.
+ * @dev: Device to wait for.
+ * @async: If unset, wait only if the device's power.async_suspend flag is set.
+ */
+static void dpm_wait(struct device *dev, bool async)
+{
+ if (!dev)
+ return;
+
+ if (async || (pm_async_enabled && dev->power.async_suspend))
+ wait_for_completion(&dev->power.completion);
+}
+
+static int dpm_wait_fn(struct device *dev, void *async_ptr)
+{
+ dpm_wait(dev, *((bool *)async_ptr));
+ return 0;
+}
+
+static void dpm_wait_for_children(struct device *dev, bool async)
+{
+ device_for_each_child(dev, &async, dpm_wait_fn);
+}
+
+static void dpm_wait_for_suppliers(struct device *dev, bool async)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ /*
+ * If the supplier goes away right after we've checked the link to it,
+ * we'll wait for its completion to change the state, but that's fine,
+ * because the only things that will block as a result are the SRCU
+ * callbacks freeing the link objects for the links in the list we're
+ * walking.
+ */
+ list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dpm_wait(link->supplier, async);
+
+ device_links_read_unlock(idx);
+}
+
+static bool dpm_wait_for_superior(struct device *dev, bool async)
+{
+ struct device *parent;
+
+ /*
+ * If the device is resumed asynchronously and the parent's callback
+ * deletes both the device and the parent itself, the parent object may
+ * be freed while this function is running, so avoid that by reference
+ * counting the parent once more unless the device has been deleted
+ * already (in which case return right away).
+ */
+ mutex_lock(&dpm_list_mtx);
+
+ if (!device_pm_initialized(dev)) {
+ mutex_unlock(&dpm_list_mtx);
+ return false;
+ }
+
+ parent = get_device(dev->parent);
+
+ mutex_unlock(&dpm_list_mtx);
+
+ dpm_wait(parent, async);
+ put_device(parent);
+
+ dpm_wait_for_suppliers(dev, async);
+
+ /*
+ * If the parent's callback has deleted the device, attempting to resume
+ * it would be invalid, so avoid doing that then.
+ */
+ return device_pm_initialized(dev);
+}
+
+static void dpm_wait_for_consumers(struct device *dev, bool async)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ /*
+ * The status of a device link can only be changed from "dormant" by a
+ * probe, but that cannot happen during system suspend/resume. In
+ * theory it can change to "dormant" at that time, but then it is
+ * reasonable to wait for the target device anyway (eg. if it goes
+ * away, it's better to wait for it to go away completely and then
+ * continue instead of trying to continue in parallel with its
+ * unregistration).
+ */
+ list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dpm_wait(link->consumer, async);
+
+ device_links_read_unlock(idx);
+}
+
+static void dpm_wait_for_subordinate(struct device *dev, bool async)
+{
+ dpm_wait_for_children(dev, async);
+ dpm_wait_for_consumers(dev, async);
+}
+
+/**
+ * pm_op - Return the PM operation appropriate for given PM event.
+ * @ops: PM operations to choose from.
+ * @state: PM transition of the system being carried out.
+ */
+static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
+{
+ switch (state.event) {
+#ifdef CONFIG_SUSPEND
+ case PM_EVENT_SUSPEND:
+ return ops->suspend;
+ case PM_EVENT_RESUME:
+ return ops->resume;
+#endif /* CONFIG_SUSPEND */
+#ifdef CONFIG_HIBERNATE_CALLBACKS
+ case PM_EVENT_FREEZE:
+ case PM_EVENT_QUIESCE:
+ return ops->freeze;
+ case PM_EVENT_HIBERNATE:
+ return ops->poweroff;
+ case PM_EVENT_THAW:
+ case PM_EVENT_RECOVER:
+ return ops->thaw;
+ case PM_EVENT_RESTORE:
+ return ops->restore;
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
+ }
+
+ return NULL;
+}
+
+/**
+ * pm_late_early_op - Return the PM operation appropriate for given PM event.
+ * @ops: PM operations to choose from.
+ * @state: PM transition of the system being carried out.
+ *
+ * Runtime PM is disabled for @dev while this function is being executed.
+ */
+static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
+ pm_message_t state)
+{
+ switch (state.event) {
+#ifdef CONFIG_SUSPEND
+ case PM_EVENT_SUSPEND:
+ return ops->suspend_late;
+ case PM_EVENT_RESUME:
+ return ops->resume_early;
+#endif /* CONFIG_SUSPEND */
+#ifdef CONFIG_HIBERNATE_CALLBACKS
+ case PM_EVENT_FREEZE:
+ case PM_EVENT_QUIESCE:
+ return ops->freeze_late;
+ case PM_EVENT_HIBERNATE:
+ return ops->poweroff_late;
+ case PM_EVENT_THAW:
+ case PM_EVENT_RECOVER:
+ return ops->thaw_early;
+ case PM_EVENT_RESTORE:
+ return ops->restore_early;
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
+ }
+
+ return NULL;
+}
+
+/**
+ * pm_noirq_op - Return the PM operation appropriate for given PM event.
+ * @ops: PM operations to choose from.
+ * @state: PM transition of the system being carried out.
+ *
+ * The driver of @dev will not receive interrupts while this function is being
+ * executed.
+ */
+static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
+{
+ switch (state.event) {
+#ifdef CONFIG_SUSPEND
+ case PM_EVENT_SUSPEND:
+ return ops->suspend_noirq;
+ case PM_EVENT_RESUME:
+ return ops->resume_noirq;
+#endif /* CONFIG_SUSPEND */
+#ifdef CONFIG_HIBERNATE_CALLBACKS
+ case PM_EVENT_FREEZE:
+ case PM_EVENT_QUIESCE:
+ return ops->freeze_noirq;
+ case PM_EVENT_HIBERNATE:
+ return ops->poweroff_noirq;
+ case PM_EVENT_THAW:
+ case PM_EVENT_RECOVER:
+ return ops->thaw_noirq;
+ case PM_EVENT_RESTORE:
+ return ops->restore_noirq;
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
+ }
+
+ return NULL;
+}
+
+static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
+{
+ dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
+ ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
+ ", may wakeup" : "");
+}
+
+static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
+ int error)
+{
+ pr_err("Device %s failed to %s%s: error %d\n",
+ dev_name(dev), pm_verb(state.event), info, error);
+}
+
+static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
+ const char *info)
+{
+ ktime_t calltime;
+ u64 usecs64;
+ int usecs;
+
+ calltime = ktime_get();
+ usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
+ do_div(usecs64, NSEC_PER_USEC);
+ usecs = usecs64;
+ if (usecs == 0)
+ usecs = 1;
+
+ pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
+ info ?: "", info ? " " : "", pm_verb(state.event),
+ error ? "aborted" : "complete",
+ usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
+}
+
+static int dpm_run_callback(pm_callback_t cb, struct device *dev,
+ pm_message_t state, const char *info)
+{
+ ktime_t calltime;
+ int error;
+
+ if (!cb)
+ return 0;
+
+ calltime = initcall_debug_start(dev, cb);
+
+ pm_dev_dbg(dev, state, info);
+ trace_device_pm_callback_start(dev, info, state.event);
+ error = cb(dev);
+ trace_device_pm_callback_end(dev, error);
+ suspend_report_result(cb, error);
+
+ initcall_debug_report(dev, calltime, cb, error);
+
+ return error;
+}
+
+#ifdef CONFIG_DPM_WATCHDOG
+struct dpm_watchdog {
+ struct device *dev;
+ struct task_struct *tsk;
+ struct timer_list timer;
+};
+
+#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
+ struct dpm_watchdog wd
+
+/**
+ * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
+ * @t: The timer that PM watchdog depends on.
+ *
+ * Called when a driver has timed out suspending or resuming.
+ * There's not much we can do here to recover so panic() to
+ * capture a crash-dump in pstore.
+ */
+static void dpm_watchdog_handler(struct timer_list *t)
+{
+ struct dpm_watchdog *wd = from_timer(wd, t, timer);
+
+ dev_emerg(wd->dev, "**** DPM device timeout ****\n");
+ show_stack(wd->tsk, NULL, KERN_EMERG);
+ panic("%s %s: unrecoverable failure\n",
+ dev_driver_string(wd->dev), dev_name(wd->dev));
+}
+
+/**
+ * dpm_watchdog_set - Enable pm watchdog for given device.
+ * @wd: Watchdog. Must be allocated on the stack.
+ * @dev: Device to handle.
+ */
+static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
+{
+ struct timer_list *timer = &wd->timer;
+
+ wd->dev = dev;
+ wd->tsk = current;
+
+ timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
+ /* use same timeout value for both suspend and resume */
+ timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
+ add_timer(timer);
+}
+
+/**
+ * dpm_watchdog_clear - Disable suspend/resume watchdog.
+ * @wd: Watchdog to disable.
+ */
+static void dpm_watchdog_clear(struct dpm_watchdog *wd)
+{
+ struct timer_list *timer = &wd->timer;
+
+ del_timer_sync(timer);
+ destroy_timer_on_stack(timer);
+}
+#else
+#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
+#define dpm_watchdog_set(x, y)
+#define dpm_watchdog_clear(x)
+#endif
+
+/*------------------------- Resume routines -------------------------*/
+
+/**
+ * dev_pm_skip_resume - System-wide device resume optimization check.
+ * @dev: Target device.
+ *
+ * Return:
+ * - %false if the transition under way is RESTORE.
+ * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
+ * - The logical negation of %power.must_resume otherwise (that is, when the
+ * transition under way is RESUME).
+ */
+bool dev_pm_skip_resume(struct device *dev)
+{
+ if (pm_transition.event == PM_EVENT_RESTORE)
+ return false;
+
+ if (pm_transition.event == PM_EVENT_THAW)
+ return dev_pm_skip_suspend(dev);
+
+ return !dev->power.must_resume;
+}
+
+/**
+ * device_resume_noirq - Execute a "noirq resume" callback for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being resumed asynchronously.
+ *
+ * The driver of @dev will not receive interrupts while this function is being
+ * executed.
+ */
+static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
+{
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
+ bool skip_resume;
+ int error = 0;
+
+ TRACE_DEVICE(dev);
+ TRACE_RESUME(0);
+
+ if (dev->power.syscore || dev->power.direct_complete)
+ goto Out;
+
+ if (!dev->power.is_noirq_suspended)
+ goto Out;
+
+ if (!dpm_wait_for_superior(dev, async))
+ goto Out;
+
+ skip_resume = dev_pm_skip_resume(dev);
+ /*
+ * If the driver callback is skipped below or by the middle layer
+ * callback and device_resume_early() also skips the driver callback for
+ * this device later, it needs to appear as "suspended" to PM-runtime,
+ * so change its status accordingly.
+ *
+ * Otherwise, the device is going to be resumed, so set its PM-runtime
+ * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
+ * to avoid confusing drivers that don't use it.
+ */
+ if (skip_resume)
+ pm_runtime_set_suspended(dev);
+ else if (dev_pm_skip_suspend(dev))
+ pm_runtime_set_active(dev);
+
+ if (dev->pm_domain) {
+ info = "noirq power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "noirq type ";
+ callback = pm_noirq_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "noirq class ";
+ callback = pm_noirq_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "noirq bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
+ }
+ if (callback)
+ goto Run;
+
+ if (skip_resume)
+ goto Skip;
+
+ if (dev->driver && dev->driver->pm) {
+ info = "noirq driver ";
+ callback = pm_noirq_op(dev->driver->pm, state);
+ }
+
+Run:
+ error = dpm_run_callback(callback, dev, state, info);
+
+Skip:
+ dev->power.is_noirq_suspended = false;
+
+Out:
+ complete_all(&dev->power.completion);
+ TRACE_RESUME(error);
+ return error;
+}
+
+static bool is_async(struct device *dev)
+{
+ return dev->power.async_suspend && pm_async_enabled
+ && !pm_trace_is_enabled();
+}
+
+static bool dpm_async_fn(struct device *dev, async_func_t func)
+{
+ reinit_completion(&dev->power.completion);
+
+ if (is_async(dev)) {
+ get_device(dev);
+ async_schedule_dev(func, dev);
+ return true;
+ }
+
+ return false;
+}
+
+static void async_resume_noirq(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = device_resume_noirq(dev, pm_transition, true);
+ if (error)
+ pm_dev_err(dev, pm_transition, " async", error);
+
+ put_device(dev);
+}
+
+static void dpm_noirq_resume_devices(pm_message_t state)
+{
+ struct device *dev;
+ ktime_t starttime = ktime_get();
+
+ trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
+ mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+
+ /*
+ * Advanced the async threads upfront,
+ * in case the starting of async threads is
+ * delayed by non-async resuming devices.
+ */
+ list_for_each_entry(dev, &dpm_noirq_list, power.entry)
+ dpm_async_fn(dev, async_resume_noirq);
+
+ while (!list_empty(&dpm_noirq_list)) {
+ dev = to_device(dpm_noirq_list.next);
+ get_device(dev);
+ list_move_tail(&dev->power.entry, &dpm_late_early_list);
+ mutex_unlock(&dpm_list_mtx);
+
+ if (!is_async(dev)) {
+ int error;
+
+ error = device_resume_noirq(dev, state, false);
+ if (error) {
+ suspend_stats.failed_resume_noirq++;
+ dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, " noirq", error);
+ }
+ }
+
+ mutex_lock(&dpm_list_mtx);
+ put_device(dev);
+ }
+ mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
+ dpm_show_time(starttime, state, 0, "noirq");
+ trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
+}
+
+/**
+ * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
+ * @state: PM transition of the system being carried out.
+ *
+ * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
+ * allow device drivers' interrupt handlers to be called.
+ */
+void dpm_resume_noirq(pm_message_t state)
+{
+ dpm_noirq_resume_devices(state);
+
+ resume_device_irqs();
+ device_wakeup_disarm_wake_irqs();
+
+ cpuidle_resume();
+}
+
+/**
+ * device_resume_early - Execute an "early resume" callback for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being resumed asynchronously.
+ *
+ * Runtime PM is disabled for @dev while this function is being executed.
+ */
+static int device_resume_early(struct device *dev, pm_message_t state, bool async)
+{
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
+ int error = 0;
+
+ TRACE_DEVICE(dev);
+ TRACE_RESUME(0);
+
+ if (dev->power.syscore || dev->power.direct_complete)
+ goto Out;
+
+ if (!dev->power.is_late_suspended)
+ goto Out;
+
+ if (!dpm_wait_for_superior(dev, async))
+ goto Out;
+
+ if (dev->pm_domain) {
+ info = "early power domain ";
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "early type ";
+ callback = pm_late_early_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "early class ";
+ callback = pm_late_early_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "early bus ";
+ callback = pm_late_early_op(dev->bus->pm, state);
+ }
+ if (callback)
+ goto Run;
+
+ if (dev_pm_skip_resume(dev))
+ goto Skip;
+
+ if (dev->driver && dev->driver->pm) {
+ info = "early driver ";
+ callback = pm_late_early_op(dev->driver->pm, state);
+ }
+
+Run:
+ error = dpm_run_callback(callback, dev, state, info);
+
+Skip:
+ dev->power.is_late_suspended = false;
+
+Out:
+ TRACE_RESUME(error);
+
+ pm_runtime_enable(dev);
+ complete_all(&dev->power.completion);
+ return error;
+}
+
+static void async_resume_early(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = device_resume_early(dev, pm_transition, true);
+ if (error)
+ pm_dev_err(dev, pm_transition, " async", error);
+
+ put_device(dev);
+}
+
+/**
+ * dpm_resume_early - Execute "early resume" callbacks for all devices.
+ * @state: PM transition of the system being carried out.
+ */
+void dpm_resume_early(pm_message_t state)
+{
+ struct device *dev;
+ ktime_t starttime = ktime_get();
+
+ trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
+ mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+
+ /*
+ * Advanced the async threads upfront,
+ * in case the starting of async threads is
+ * delayed by non-async resuming devices.
+ */
+ list_for_each_entry(dev, &dpm_late_early_list, power.entry)
+ dpm_async_fn(dev, async_resume_early);
+
+ while (!list_empty(&dpm_late_early_list)) {
+ dev = to_device(dpm_late_early_list.next);
+ get_device(dev);
+ list_move_tail(&dev->power.entry, &dpm_suspended_list);
+ mutex_unlock(&dpm_list_mtx);
+
+ if (!is_async(dev)) {
+ int error;
+
+ error = device_resume_early(dev, state, false);
+ if (error) {
+ suspend_stats.failed_resume_early++;
+ dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, " early", error);
+ }
+ }
+ mutex_lock(&dpm_list_mtx);
+ put_device(dev);
+ }
+ mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
+ dpm_show_time(starttime, state, 0, "early");
+ trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
+}
+
+/**
+ * dpm_resume_start - Execute "noirq" and "early" device callbacks.
+ * @state: PM transition of the system being carried out.
+ */
+void dpm_resume_start(pm_message_t state)
+{
+ dpm_resume_noirq(state);
+ dpm_resume_early(state);
+}
+EXPORT_SYMBOL_GPL(dpm_resume_start);
+
+/**
+ * device_resume - Execute "resume" callbacks for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being resumed asynchronously.
+ */
+static int device_resume(struct device *dev, pm_message_t state, bool async)
+{
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
+ int error = 0;
+ DECLARE_DPM_WATCHDOG_ON_STACK(wd);
+
+ TRACE_DEVICE(dev);
+ TRACE_RESUME(0);
+
+ if (dev->power.syscore)
+ goto Complete;
+
+ if (dev->power.direct_complete) {
+ /* Match the pm_runtime_disable() in __device_suspend(). */
+ pm_runtime_enable(dev);
+ goto Complete;
+ }
+
+ if (!dpm_wait_for_superior(dev, async))
+ goto Complete;
+
+ dpm_watchdog_set(&wd, dev);
+ device_lock(dev);
+
+ /*
+ * This is a fib. But we'll allow new children to be added below
+ * a resumed device, even if the device hasn't been completed yet.
+ */
+ dev->power.is_prepared = false;
+
+ if (!dev->power.is_suspended)
+ goto Unlock;
+
+ if (dev->pm_domain) {
+ info = "power domain ";
+ callback = pm_op(&dev->pm_domain->ops, state);
+ goto Driver;
+ }
+
+ if (dev->type && dev->type->pm) {
+ info = "type ";
+ callback = pm_op(dev->type->pm, state);
+ goto Driver;
+ }
+
+ if (dev->class && dev->class->pm) {
+ info = "class ";
+ callback = pm_op(dev->class->pm, state);
+ goto Driver;
+ }
+
+ if (dev->bus) {
+ if (dev->bus->pm) {
+ info = "bus ";
+ callback = pm_op(dev->bus->pm, state);
+ } else if (dev->bus->resume) {
+ info = "legacy bus ";
+ callback = dev->bus->resume;
+ goto End;
+ }
+ }
+
+ Driver:
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "driver ";
+ callback = pm_op(dev->driver->pm, state);
+ }
+
+ End:
+ error = dpm_run_callback(callback, dev, state, info);
+ dev->power.is_suspended = false;
+
+ Unlock:
+ device_unlock(dev);
+ dpm_watchdog_clear(&wd);
+
+ Complete:
+ complete_all(&dev->power.completion);
+
+ TRACE_RESUME(error);
+
+ return error;
+}
+
+static void async_resume(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = device_resume(dev, pm_transition, true);
+ if (error)
+ pm_dev_err(dev, pm_transition, " async", error);
+ put_device(dev);
+}
+
+/**
+ * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
+ * @state: PM transition of the system being carried out.
+ *
+ * Execute the appropriate "resume" callback for all devices whose status
+ * indicates that they are suspended.
+ */
+void dpm_resume(pm_message_t state)
+{
+ struct device *dev;
+ ktime_t starttime = ktime_get();
+
+ trace_suspend_resume(TPS("dpm_resume"), state.event, true);
+ might_sleep();
+
+ mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+ async_error = 0;
+
+ list_for_each_entry(dev, &dpm_suspended_list, power.entry)
+ dpm_async_fn(dev, async_resume);
+
+ while (!list_empty(&dpm_suspended_list)) {
+ dev = to_device(dpm_suspended_list.next);
+ get_device(dev);
+ if (!is_async(dev)) {
+ int error;
+
+ mutex_unlock(&dpm_list_mtx);
+
+ error = device_resume(dev, state, false);
+ if (error) {
+ suspend_stats.failed_resume++;
+ dpm_save_failed_step(SUSPEND_RESUME);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, "", error);
+ }
+
+ mutex_lock(&dpm_list_mtx);
+ }
+ if (!list_empty(&dev->power.entry))
+ list_move_tail(&dev->power.entry, &dpm_prepared_list);
+ put_device(dev);
+ }
+ mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
+ dpm_show_time(starttime, state, 0, NULL);
+
+ cpufreq_resume();
+ devfreq_resume();
+ trace_suspend_resume(TPS("dpm_resume"), state.event, false);
+}
+
+/**
+ * device_complete - Complete a PM transition for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ */
+static void device_complete(struct device *dev, pm_message_t state)
+{
+ void (*callback)(struct device *) = NULL;
+ const char *info = NULL;
+
+ if (dev->power.syscore)
+ goto out;
+
+ device_lock(dev);
+
+ if (dev->pm_domain) {
+ info = "completing power domain ";
+ callback = dev->pm_domain->ops.complete;
+ } else if (dev->type && dev->type->pm) {
+ info = "completing type ";
+ callback = dev->type->pm->complete;
+ } else if (dev->class && dev->class->pm) {
+ info = "completing class ";
+ callback = dev->class->pm->complete;
+ } else if (dev->bus && dev->bus->pm) {
+ info = "completing bus ";
+ callback = dev->bus->pm->complete;
+ }
+
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "completing driver ";
+ callback = dev->driver->pm->complete;
+ }
+
+ if (callback) {
+ pm_dev_dbg(dev, state, info);
+ callback(dev);
+ }
+
+ device_unlock(dev);
+
+out:
+ pm_runtime_put(dev);
+}
+
+/**
+ * dpm_complete - Complete a PM transition for all non-sysdev devices.
+ * @state: PM transition of the system being carried out.
+ *
+ * Execute the ->complete() callbacks for all devices whose PM status is not
+ * DPM_ON (this allows new devices to be registered).
+ */
+void dpm_complete(pm_message_t state)
+{
+ struct list_head list;
+
+ trace_suspend_resume(TPS("dpm_complete"), state.event, true);
+ might_sleep();
+
+ INIT_LIST_HEAD(&list);
+ mutex_lock(&dpm_list_mtx);
+ while (!list_empty(&dpm_prepared_list)) {
+ struct device *dev = to_device(dpm_prepared_list.prev);
+
+ get_device(dev);
+ dev->power.is_prepared = false;
+ list_move(&dev->power.entry, &list);
+ mutex_unlock(&dpm_list_mtx);
+
+ trace_device_pm_callback_start(dev, "", state.event);
+ device_complete(dev, state);
+ trace_device_pm_callback_end(dev, 0);
+
+ mutex_lock(&dpm_list_mtx);
+ put_device(dev);
+ }
+ list_splice(&list, &dpm_list);
+ mutex_unlock(&dpm_list_mtx);
+
+ /* Allow device probing and trigger re-probing of deferred devices */
+ device_unblock_probing();
+ trace_suspend_resume(TPS("dpm_complete"), state.event, false);
+}
+
+/**
+ * dpm_resume_end - Execute "resume" callbacks and complete system transition.
+ * @state: PM transition of the system being carried out.
+ *
+ * Execute "resume" callbacks for all devices and complete the PM transition of
+ * the system.
+ */
+void dpm_resume_end(pm_message_t state)
+{
+ dpm_resume(state);
+ dpm_complete(state);
+}
+EXPORT_SYMBOL_GPL(dpm_resume_end);
+
+
+/*------------------------- Suspend routines -------------------------*/
+
+/**
+ * resume_event - Return a "resume" message for given "suspend" sleep state.
+ * @sleep_state: PM message representing a sleep state.
+ *
+ * Return a PM message representing the resume event corresponding to given
+ * sleep state.
+ */
+static pm_message_t resume_event(pm_message_t sleep_state)
+{
+ switch (sleep_state.event) {
+ case PM_EVENT_SUSPEND:
+ return PMSG_RESUME;
+ case PM_EVENT_FREEZE:
+ case PM_EVENT_QUIESCE:
+ return PMSG_RECOVER;
+ case PM_EVENT_HIBERNATE:
+ return PMSG_RESTORE;
+ }
+ return PMSG_ON;
+}
+
+static void dpm_superior_set_must_resume(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ if (dev->parent)
+ dev->parent->power.must_resume = true;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
+ link->supplier->power.must_resume = true;
+
+ device_links_read_unlock(idx);
+}
+
+/**
+ * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being suspended asynchronously.
+ *
+ * The driver of @dev will not receive interrupts while this function is being
+ * executed.
+ */
+static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
+{
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
+ int error = 0;
+
+ TRACE_DEVICE(dev);
+ TRACE_SUSPEND(0);
+
+ dpm_wait_for_subordinate(dev, async);
+
+ if (async_error)
+ goto Complete;
+
+ if (dev->power.syscore || dev->power.direct_complete)
+ goto Complete;
+
+ if (dev->pm_domain) {
+ info = "noirq power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "noirq type ";
+ callback = pm_noirq_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "noirq class ";
+ callback = pm_noirq_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "noirq bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
+ }
+ if (callback)
+ goto Run;
+
+ if (dev_pm_skip_suspend(dev))
+ goto Skip;
+
+ if (dev->driver && dev->driver->pm) {
+ info = "noirq driver ";
+ callback = pm_noirq_op(dev->driver->pm, state);
+ }
+
+Run:
+ error = dpm_run_callback(callback, dev, state, info);
+ if (error) {
+ async_error = error;
+ goto Complete;
+ }
+
+Skip:
+ dev->power.is_noirq_suspended = true;
+
+ /*
+ * Skipping the resume of devices that were in use right before the
+ * system suspend (as indicated by their PM-runtime usage counters)
+ * would be suboptimal. Also resume them if doing that is not allowed
+ * to be skipped.
+ */
+ if (atomic_read(&dev->power.usage_count) > 1 ||
+ !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
+ dev->power.may_skip_resume))
+ dev->power.must_resume = true;
+
+ if (dev->power.must_resume)
+ dpm_superior_set_must_resume(dev);
+
+Complete:
+ complete_all(&dev->power.completion);
+ TRACE_SUSPEND(error);
+ return error;
+}
+
+static void async_suspend_noirq(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = __device_suspend_noirq(dev, pm_transition, true);
+ if (error) {
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, pm_transition, " async", error);
+ }
+
+ put_device(dev);
+}
+
+static int device_suspend_noirq(struct device *dev)
+{
+ if (dpm_async_fn(dev, async_suspend_noirq))
+ return 0;
+
+ return __device_suspend_noirq(dev, pm_transition, false);
+}
+
+static int dpm_noirq_suspend_devices(pm_message_t state)
+{
+ ktime_t starttime = ktime_get();
+ int error = 0;
+
+ trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
+ mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+ async_error = 0;
+
+ while (!list_empty(&dpm_late_early_list)) {
+ struct device *dev = to_device(dpm_late_early_list.prev);
+
+ get_device(dev);
+ mutex_unlock(&dpm_list_mtx);
+
+ error = device_suspend_noirq(dev);
+
+ mutex_lock(&dpm_list_mtx);
+ if (error) {
+ pm_dev_err(dev, state, " noirq", error);
+ dpm_save_failed_dev(dev_name(dev));
+ put_device(dev);
+ break;
+ }
+ if (!list_empty(&dev->power.entry))
+ list_move(&dev->power.entry, &dpm_noirq_list);
+ put_device(dev);
+
+ if (async_error)
+ break;
+ }
+ mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
+ if (!error)
+ error = async_error;
+
+ if (error) {
+ suspend_stats.failed_suspend_noirq++;
+ dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
+ }
+ dpm_show_time(starttime, state, error, "noirq");
+ trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
+ return error;
+}
+
+/**
+ * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
+ * @state: PM transition of the system being carried out.
+ *
+ * Prevent device drivers' interrupt handlers from being called and invoke
+ * "noirq" suspend callbacks for all non-sysdev devices.
+ */
+int dpm_suspend_noirq(pm_message_t state)
+{
+ int ret;
+
+ cpuidle_pause();
+
+ device_wakeup_arm_wake_irqs();
+ suspend_device_irqs();
+
+ ret = dpm_noirq_suspend_devices(state);
+ if (ret)
+ dpm_resume_noirq(resume_event(state));
+
+ return ret;
+}
+
+static void dpm_propagate_wakeup_to_parent(struct device *dev)
+{
+ struct device *parent = dev->parent;
+
+ if (!parent)
+ return;
+
+ spin_lock_irq(&parent->power.lock);
+
+ if (dev->power.wakeup_path && !parent->power.ignore_children)
+ parent->power.wakeup_path = true;
+
+ spin_unlock_irq(&parent->power.lock);
+}
+
+/**
+ * __device_suspend_late - Execute a "late suspend" callback for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being suspended asynchronously.
+ *
+ * Runtime PM is disabled for @dev while this function is being executed.
+ */
+static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
+{
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
+ int error = 0;
+
+ TRACE_DEVICE(dev);
+ TRACE_SUSPEND(0);
+
+ __pm_runtime_disable(dev, false);
+
+ dpm_wait_for_subordinate(dev, async);
+
+ if (async_error)
+ goto Complete;
+
+ if (pm_wakeup_pending()) {
+ async_error = -EBUSY;
+ goto Complete;
+ }
+
+ if (dev->power.syscore || dev->power.direct_complete)
+ goto Complete;
+
+ if (dev->pm_domain) {
+ info = "late power domain ";
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "late type ";
+ callback = pm_late_early_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "late class ";
+ callback = pm_late_early_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "late bus ";
+ callback = pm_late_early_op(dev->bus->pm, state);
+ }
+ if (callback)
+ goto Run;
+
+ if (dev_pm_skip_suspend(dev))
+ goto Skip;
+
+ if (dev->driver && dev->driver->pm) {
+ info = "late driver ";
+ callback = pm_late_early_op(dev->driver->pm, state);
+ }
+
+Run:
+ error = dpm_run_callback(callback, dev, state, info);
+ if (error) {
+ async_error = error;
+ goto Complete;
+ }
+ dpm_propagate_wakeup_to_parent(dev);
+
+Skip:
+ dev->power.is_late_suspended = true;
+
+Complete:
+ TRACE_SUSPEND(error);
+ complete_all(&dev->power.completion);
+ return error;
+}
+
+static void async_suspend_late(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = __device_suspend_late(dev, pm_transition, true);
+ if (error) {
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, pm_transition, " async", error);
+ }
+ put_device(dev);
+}
+
+static int device_suspend_late(struct device *dev)
+{
+ if (dpm_async_fn(dev, async_suspend_late))
+ return 0;
+
+ return __device_suspend_late(dev, pm_transition, false);
+}
+
+/**
+ * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
+ * @state: PM transition of the system being carried out.
+ */
+int dpm_suspend_late(pm_message_t state)
+{
+ ktime_t starttime = ktime_get();
+ int error = 0;
+
+ trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
+ mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+ async_error = 0;
+
+ while (!list_empty(&dpm_suspended_list)) {
+ struct device *dev = to_device(dpm_suspended_list.prev);
+
+ get_device(dev);
+ mutex_unlock(&dpm_list_mtx);
+
+ error = device_suspend_late(dev);
+
+ mutex_lock(&dpm_list_mtx);
+ if (!list_empty(&dev->power.entry))
+ list_move(&dev->power.entry, &dpm_late_early_list);
+
+ if (error) {
+ pm_dev_err(dev, state, " late", error);
+ dpm_save_failed_dev(dev_name(dev));
+ put_device(dev);
+ break;
+ }
+ put_device(dev);
+
+ if (async_error)
+ break;
+ }
+ mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
+ if (!error)
+ error = async_error;
+ if (error) {
+ suspend_stats.failed_suspend_late++;
+ dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
+ dpm_resume_early(resume_event(state));
+ }
+ dpm_show_time(starttime, state, error, "late");
+ trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
+ return error;
+}
+
+/**
+ * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
+ * @state: PM transition of the system being carried out.
+ */
+int dpm_suspend_end(pm_message_t state)
+{
+ ktime_t starttime = ktime_get();
+ int error;
+
+ error = dpm_suspend_late(state);
+ if (error)
+ goto out;
+
+ error = dpm_suspend_noirq(state);
+ if (error)
+ dpm_resume_early(resume_event(state));
+
+out:
+ dpm_show_time(starttime, state, error, "end");
+ return error;
+}
+EXPORT_SYMBOL_GPL(dpm_suspend_end);
+
+/**
+ * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
+ * @dev: Device to suspend.
+ * @state: PM transition of the system being carried out.
+ * @cb: Suspend callback to execute.
+ * @info: string description of caller.
+ */
+static int legacy_suspend(struct device *dev, pm_message_t state,
+ int (*cb)(struct device *dev, pm_message_t state),
+ const char *info)
+{
+ int error;
+ ktime_t calltime;
+
+ calltime = initcall_debug_start(dev, cb);
+
+ trace_device_pm_callback_start(dev, info, state.event);
+ error = cb(dev, state);
+ trace_device_pm_callback_end(dev, error);
+ suspend_report_result(cb, error);
+
+ initcall_debug_report(dev, calltime, cb, error);
+
+ return error;
+}
+
+static void dpm_clear_superiors_direct_complete(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ if (dev->parent) {
+ spin_lock_irq(&dev->parent->power.lock);
+ dev->parent->power.direct_complete = false;
+ spin_unlock_irq(&dev->parent->power.lock);
+ }
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
+ spin_lock_irq(&link->supplier->power.lock);
+ link->supplier->power.direct_complete = false;
+ spin_unlock_irq(&link->supplier->power.lock);
+ }
+
+ device_links_read_unlock(idx);
+}
+
+/**
+ * __device_suspend - Execute "suspend" callbacks for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being suspended asynchronously.
+ */
+static int __device_suspend(struct device *dev, pm_message_t state, bool async)
+{
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
+ int error = 0;
+ DECLARE_DPM_WATCHDOG_ON_STACK(wd);
+
+ TRACE_DEVICE(dev);
+ TRACE_SUSPEND(0);
+
+ dpm_wait_for_subordinate(dev, async);
+
+ if (async_error) {
+ dev->power.direct_complete = false;
+ goto Complete;
+ }
+
+ /*
+ * Wait for possible runtime PM transitions of the device in progress
+ * to complete and if there's a runtime resume request pending for it,
+ * resume it before proceeding with invoking the system-wide suspend
+ * callbacks for it.
+ *
+ * If the system-wide suspend callbacks below change the configuration
+ * of the device, they must disable runtime PM for it or otherwise
+ * ensure that its runtime-resume callbacks will not be confused by that
+ * change in case they are invoked going forward.
+ */
+ pm_runtime_barrier(dev);
+
+ if (pm_wakeup_pending()) {
+ dev->power.direct_complete = false;
+ async_error = -EBUSY;
+ goto Complete;
+ }
+
+ if (dev->power.syscore)
+ goto Complete;
+
+ /* Avoid direct_complete to let wakeup_path propagate. */
+ if (device_may_wakeup(dev) || dev->power.wakeup_path)
+ dev->power.direct_complete = false;
+
+ if (dev->power.direct_complete) {
+ if (pm_runtime_status_suspended(dev)) {
+ pm_runtime_disable(dev);
+ if (pm_runtime_status_suspended(dev)) {
+ pm_dev_dbg(dev, state, "direct-complete ");
+ goto Complete;
+ }
+
+ pm_runtime_enable(dev);
+ }
+ dev->power.direct_complete = false;
+ }
+
+ dev->power.may_skip_resume = true;
+ dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
+
+ dpm_watchdog_set(&wd, dev);
+ device_lock(dev);
+
+ if (dev->pm_domain) {
+ info = "power domain ";
+ callback = pm_op(&dev->pm_domain->ops, state);
+ goto Run;
+ }
+
+ if (dev->type && dev->type->pm) {
+ info = "type ";
+ callback = pm_op(dev->type->pm, state);
+ goto Run;
+ }
+
+ if (dev->class && dev->class->pm) {
+ info = "class ";
+ callback = pm_op(dev->class->pm, state);
+ goto Run;
+ }
+
+ if (dev->bus) {
+ if (dev->bus->pm) {
+ info = "bus ";
+ callback = pm_op(dev->bus->pm, state);
+ } else if (dev->bus->suspend) {
+ pm_dev_dbg(dev, state, "legacy bus ");
+ error = legacy_suspend(dev, state, dev->bus->suspend,
+ "legacy bus ");
+ goto End;
+ }
+ }
+
+ Run:
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "driver ";
+ callback = pm_op(dev->driver->pm, state);
+ }
+
+ error = dpm_run_callback(callback, dev, state, info);
+
+ End:
+ if (!error) {
+ dev->power.is_suspended = true;
+ if (device_may_wakeup(dev))
+ dev->power.wakeup_path = true;
+
+ dpm_propagate_wakeup_to_parent(dev);
+ dpm_clear_superiors_direct_complete(dev);
+ }
+
+ device_unlock(dev);
+ dpm_watchdog_clear(&wd);
+
+ Complete:
+ if (error)
+ async_error = error;
+
+ complete_all(&dev->power.completion);
+ TRACE_SUSPEND(error);
+ return error;
+}
+
+static void async_suspend(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = __device_suspend(dev, pm_transition, true);
+ if (error) {
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, pm_transition, " async", error);
+ }
+
+ put_device(dev);
+}
+
+static int device_suspend(struct device *dev)
+{
+ if (dpm_async_fn(dev, async_suspend))
+ return 0;
+
+ return __device_suspend(dev, pm_transition, false);
+}
+
+/**
+ * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
+ * @state: PM transition of the system being carried out.
+ */
+int dpm_suspend(pm_message_t state)
+{
+ ktime_t starttime = ktime_get();
+ int error = 0;
+
+ trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
+ might_sleep();
+
+ devfreq_suspend();
+ cpufreq_suspend();
+
+ mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+ async_error = 0;
+ while (!list_empty(&dpm_prepared_list)) {
+ struct device *dev = to_device(dpm_prepared_list.prev);
+
+ get_device(dev);
+ mutex_unlock(&dpm_list_mtx);
+
+ error = device_suspend(dev);
+
+ mutex_lock(&dpm_list_mtx);
+ if (error) {
+ pm_dev_err(dev, state, "", error);
+ dpm_save_failed_dev(dev_name(dev));
+ put_device(dev);
+ break;
+ }
+ if (!list_empty(&dev->power.entry))
+ list_move(&dev->power.entry, &dpm_suspended_list);
+ put_device(dev);
+ if (async_error)
+ break;
+ }
+ mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
+ if (!error)
+ error = async_error;
+ if (error) {
+ suspend_stats.failed_suspend++;
+ dpm_save_failed_step(SUSPEND_SUSPEND);
+ }
+ dpm_show_time(starttime, state, error, NULL);
+ trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
+ return error;
+}
+
+/**
+ * device_prepare - Prepare a device for system power transition.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ *
+ * Execute the ->prepare() callback(s) for given device. No new children of the
+ * device may be registered after this function has returned.
+ */
+static int device_prepare(struct device *dev, pm_message_t state)
+{
+ int (*callback)(struct device *) = NULL;
+ int ret = 0;
+
+ /*
+ * If a device's parent goes into runtime suspend at the wrong time,
+ * it won't be possible to resume the device. To prevent this we
+ * block runtime suspend here, during the prepare phase, and allow
+ * it again during the complete phase.
+ */
+ pm_runtime_get_noresume(dev);
+
+ if (dev->power.syscore)
+ return 0;
+
+ device_lock(dev);
+
+ dev->power.wakeup_path = false;
+
+ if (dev->power.no_pm_callbacks)
+ goto unlock;
+
+ if (dev->pm_domain)
+ callback = dev->pm_domain->ops.prepare;
+ else if (dev->type && dev->type->pm)
+ callback = dev->type->pm->prepare;
+ else if (dev->class && dev->class->pm)
+ callback = dev->class->pm->prepare;
+ else if (dev->bus && dev->bus->pm)
+ callback = dev->bus->pm->prepare;
+
+ if (!callback && dev->driver && dev->driver->pm)
+ callback = dev->driver->pm->prepare;
+
+ if (callback)
+ ret = callback(dev);
+
+unlock:
+ device_unlock(dev);
+
+ if (ret < 0) {
+ suspend_report_result(callback, ret);
+ pm_runtime_put(dev);
+ return ret;
+ }
+ /*
+ * A positive return value from ->prepare() means "this device appears
+ * to be runtime-suspended and its state is fine, so if it really is
+ * runtime-suspended, you can leave it in that state provided that you
+ * will do the same thing with all of its descendants". This only
+ * applies to suspend transitions, however.
+ */
+ spin_lock_irq(&dev->power.lock);
+ dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
+ (ret > 0 || dev->power.no_pm_callbacks) &&
+ !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
+ spin_unlock_irq(&dev->power.lock);
+ return 0;
+}
+
+/**
+ * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
+ * @state: PM transition of the system being carried out.
+ *
+ * Execute the ->prepare() callback(s) for all devices.
+ */
+int dpm_prepare(pm_message_t state)
+{
+ int error = 0;
+
+ trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
+ might_sleep();
+
+ /*
+ * Give a chance for the known devices to complete their probes, before
+ * disable probing of devices. This sync point is important at least
+ * at boot time + hibernation restore.
+ */
+ wait_for_device_probe();
+ /*
+ * It is unsafe if probing of devices will happen during suspend or
+ * hibernation and system behavior will be unpredictable in this case.
+ * So, let's prohibit device's probing here and defer their probes
+ * instead. The normal behavior will be restored in dpm_complete().
+ */
+ device_block_probing();
+
+ mutex_lock(&dpm_list_mtx);
+ while (!list_empty(&dpm_list)) {
+ struct device *dev = to_device(dpm_list.next);
+
+ get_device(dev);
+ mutex_unlock(&dpm_list_mtx);
+
+ trace_device_pm_callback_start(dev, "", state.event);
+ error = device_prepare(dev, state);
+ trace_device_pm_callback_end(dev, error);
+
+ mutex_lock(&dpm_list_mtx);
+ if (error) {
+ if (error == -EAGAIN) {
+ put_device(dev);
+ error = 0;
+ continue;
+ }
+ pr_info("Device %s not prepared for power transition: code %d\n",
+ dev_name(dev), error);
+ put_device(dev);
+ break;
+ }
+ dev->power.is_prepared = true;
+ if (!list_empty(&dev->power.entry))
+ list_move_tail(&dev->power.entry, &dpm_prepared_list);
+ put_device(dev);
+ }
+ mutex_unlock(&dpm_list_mtx);
+ trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
+ return error;
+}
+
+/**
+ * dpm_suspend_start - Prepare devices for PM transition and suspend them.
+ * @state: PM transition of the system being carried out.
+ *
+ * Prepare all non-sysdev devices for system PM transition and execute "suspend"
+ * callbacks for them.
+ */
+int dpm_suspend_start(pm_message_t state)
+{
+ ktime_t starttime = ktime_get();
+ int error;
+
+ error = dpm_prepare(state);
+ if (error) {
+ suspend_stats.failed_prepare++;
+ dpm_save_failed_step(SUSPEND_PREPARE);
+ } else
+ error = dpm_suspend(state);
+ dpm_show_time(starttime, state, error, "start");
+ return error;
+}
+EXPORT_SYMBOL_GPL(dpm_suspend_start);
+
+void __suspend_report_result(const char *function, void *fn, int ret)
+{
+ if (ret)
+ pr_err("%s(): %pS returns %d\n", function, fn, ret);
+}
+EXPORT_SYMBOL_GPL(__suspend_report_result);
+
+/**
+ * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
+ * @subordinate: Device that needs to wait for @dev.
+ * @dev: Device to wait for.
+ */
+int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
+{
+ dpm_wait(dev, subordinate->power.async_suspend);
+ return async_error;
+}
+EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
+
+/**
+ * dpm_for_each_dev - device iterator.
+ * @data: data for the callback.
+ * @fn: function to be called for each device.
+ *
+ * Iterate over devices in dpm_list, and call @fn for each device,
+ * passing it @data.
+ */
+void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
+{
+ struct device *dev;
+
+ if (!fn)
+ return;
+
+ device_pm_lock();
+ list_for_each_entry(dev, &dpm_list, power.entry)
+ fn(dev, data);
+ device_pm_unlock();
+}
+EXPORT_SYMBOL_GPL(dpm_for_each_dev);
+
+static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
+{
+ if (!ops)
+ return true;
+
+ return !ops->prepare &&
+ !ops->suspend &&
+ !ops->suspend_late &&
+ !ops->suspend_noirq &&
+ !ops->resume_noirq &&
+ !ops->resume_early &&
+ !ops->resume &&
+ !ops->complete;
+}
+
+void device_pm_check_callbacks(struct device *dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ dev->power.no_pm_callbacks =
+ (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
+ !dev->bus->suspend && !dev->bus->resume)) &&
+ (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
+ (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
+ (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
+ (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
+ !dev->driver->suspend && !dev->driver->resume));
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+}
+
+bool dev_pm_skip_suspend(struct device *dev)
+{
+ return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
+ pm_runtime_status_suspended(dev);
+}
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
new file mode 100644
index 000000000..922ed457d
--- /dev/null
+++ b/drivers/base/power/power.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/pm_qos.h>
+
+static inline void device_pm_init_common(struct device *dev)
+{
+ if (!dev->power.early_init) {
+ spin_lock_init(&dev->power.lock);
+ dev->power.qos = NULL;
+ dev->power.early_init = true;
+ }
+}
+
+#ifdef CONFIG_PM
+
+static inline void pm_runtime_early_init(struct device *dev)
+{
+ dev->power.disable_depth = 1;
+ device_pm_init_common(dev);
+}
+
+extern void pm_runtime_init(struct device *dev);
+extern void pm_runtime_reinit(struct device *dev);
+extern void pm_runtime_remove(struct device *dev);
+extern u64 pm_runtime_active_time(struct device *dev);
+
+#define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0)
+#define WAKE_IRQ_DEDICATED_MANAGED BIT(1)
+#define WAKE_IRQ_DEDICATED_REVERSE BIT(2)
+#define WAKE_IRQ_DEDICATED_MASK (WAKE_IRQ_DEDICATED_ALLOCATED | \
+ WAKE_IRQ_DEDICATED_MANAGED | \
+ WAKE_IRQ_DEDICATED_REVERSE)
+#define WAKE_IRQ_DEDICATED_ENABLED BIT(3)
+
+struct wake_irq {
+ struct device *dev;
+ unsigned int status;
+ int irq;
+ const char *name;
+};
+
+extern void dev_pm_arm_wake_irq(struct wake_irq *wirq);
+extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq);
+extern void dev_pm_enable_wake_irq_check(struct device *dev,
+ bool can_change_status);
+extern void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable);
+extern void dev_pm_enable_wake_irq_complete(struct device *dev);
+
+#ifdef CONFIG_PM_SLEEP
+
+extern void device_wakeup_attach_irq(struct device *dev, struct wake_irq *wakeirq);
+extern void device_wakeup_detach_irq(struct device *dev);
+extern void device_wakeup_arm_wake_irqs(void);
+extern void device_wakeup_disarm_wake_irqs(void);
+
+#else
+
+static inline void device_wakeup_attach_irq(struct device *dev,
+ struct wake_irq *wakeirq) {}
+
+static inline void device_wakeup_detach_irq(struct device *dev)
+{
+}
+
+#endif /* CONFIG_PM_SLEEP */
+
+/*
+ * sysfs.c
+ */
+
+extern int dpm_sysfs_add(struct device *dev);
+extern void dpm_sysfs_remove(struct device *dev);
+extern void rpm_sysfs_remove(struct device *dev);
+extern int wakeup_sysfs_add(struct device *dev);
+extern void wakeup_sysfs_remove(struct device *dev);
+extern int pm_qos_sysfs_add_resume_latency(struct device *dev);
+extern void pm_qos_sysfs_remove_resume_latency(struct device *dev);
+extern int pm_qos_sysfs_add_flags(struct device *dev);
+extern void pm_qos_sysfs_remove_flags(struct device *dev);
+extern int pm_qos_sysfs_add_latency_tolerance(struct device *dev);
+extern void pm_qos_sysfs_remove_latency_tolerance(struct device *dev);
+extern int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid);
+
+#else /* CONFIG_PM */
+
+static inline void pm_runtime_early_init(struct device *dev)
+{
+ device_pm_init_common(dev);
+}
+
+static inline void pm_runtime_init(struct device *dev) {}
+static inline void pm_runtime_reinit(struct device *dev) {}
+static inline void pm_runtime_remove(struct device *dev) {}
+
+static inline int dpm_sysfs_add(struct device *dev) { return 0; }
+static inline void dpm_sysfs_remove(struct device *dev) {}
+static inline int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid,
+ kgid_t kgid) { return 0; }
+
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+
+/* kernel/power/main.c */
+extern int pm_async_enabled;
+
+/* drivers/base/power/main.c */
+extern struct list_head dpm_list; /* The active device list */
+
+static inline struct device *to_device(struct list_head *entry)
+{
+ return container_of(entry, struct device, power.entry);
+}
+
+extern void device_pm_sleep_init(struct device *dev);
+extern void device_pm_add(struct device *);
+extern void device_pm_remove(struct device *);
+extern void device_pm_move_before(struct device *, struct device *);
+extern void device_pm_move_after(struct device *, struct device *);
+extern void device_pm_move_last(struct device *);
+extern void device_pm_check_callbacks(struct device *dev);
+
+static inline bool device_pm_initialized(struct device *dev)
+{
+ return dev->power.in_dpm_list;
+}
+
+/* drivers/base/power/wakeup_stats.c */
+extern int wakeup_source_sysfs_add(struct device *parent,
+ struct wakeup_source *ws);
+extern void wakeup_source_sysfs_remove(struct wakeup_source *ws);
+
+extern int pm_wakeup_source_sysfs_add(struct device *parent);
+
+#else /* !CONFIG_PM_SLEEP */
+
+static inline void device_pm_sleep_init(struct device *dev) {}
+
+static inline void device_pm_add(struct device *dev) {}
+
+static inline void device_pm_remove(struct device *dev)
+{
+ pm_runtime_remove(dev);
+}
+
+static inline void device_pm_move_before(struct device *deva,
+ struct device *devb) {}
+static inline void device_pm_move_after(struct device *deva,
+ struct device *devb) {}
+static inline void device_pm_move_last(struct device *dev) {}
+
+static inline void device_pm_check_callbacks(struct device *dev) {}
+
+static inline bool device_pm_initialized(struct device *dev)
+{
+ return device_is_registered(dev);
+}
+
+static inline int pm_wakeup_source_sysfs_add(struct device *parent)
+{
+ return 0;
+}
+
+#endif /* !CONFIG_PM_SLEEP */
+
+static inline void device_pm_init(struct device *dev)
+{
+ device_pm_init_common(dev);
+ device_pm_sleep_init(dev);
+ pm_runtime_init(dev);
+}
diff --git a/drivers/base/power/qos-test.c b/drivers/base/power/qos-test.c
new file mode 100644
index 000000000..79fc6c441
--- /dev/null
+++ b/drivers/base/power/qos-test.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP
+ */
+#include <kunit/test.h>
+#include <linux/pm_qos.h>
+
+/* Basic test for aggregating two "min" requests */
+static void freq_qos_test_min(struct kunit *test)
+{
+ struct freq_constraints qos;
+ struct freq_qos_request req1, req2;
+ int ret;
+
+ freq_constraints_init(&qos);
+ memset(&req1, 0, sizeof(req1));
+ memset(&req2, 0, sizeof(req2));
+
+ ret = freq_qos_add_request(&qos, &req1, FREQ_QOS_MIN, 1000);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ ret = freq_qos_add_request(&qos, &req2, FREQ_QOS_MIN, 2000);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 2000);
+
+ ret = freq_qos_remove_request(&req2);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 1000);
+
+ ret = freq_qos_remove_request(&req1);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN),
+ FREQ_QOS_MIN_DEFAULT_VALUE);
+}
+
+/* Test that requests for MAX_DEFAULT_VALUE have no effect */
+static void freq_qos_test_maxdef(struct kunit *test)
+{
+ struct freq_constraints qos;
+ struct freq_qos_request req1, req2;
+ int ret;
+
+ freq_constraints_init(&qos);
+ memset(&req1, 0, sizeof(req1));
+ memset(&req2, 0, sizeof(req2));
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX),
+ FREQ_QOS_MAX_DEFAULT_VALUE);
+
+ ret = freq_qos_add_request(&qos, &req1, FREQ_QOS_MAX,
+ FREQ_QOS_MAX_DEFAULT_VALUE);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ ret = freq_qos_add_request(&qos, &req2, FREQ_QOS_MAX,
+ FREQ_QOS_MAX_DEFAULT_VALUE);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* Add max 1000 */
+ ret = freq_qos_update_request(&req1, 1000);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), 1000);
+
+ /* Add max 2000, no impact */
+ ret = freq_qos_update_request(&req2, 2000);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), 1000);
+
+ /* Remove max 1000, new max 2000 */
+ ret = freq_qos_remove_request(&req1);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), 2000);
+}
+
+/*
+ * Test that a freq_qos_request can be added again after removal
+ *
+ * This issue was solved by commit 05ff1ba412fd ("PM: QoS: Invalidate frequency
+ * QoS requests after removal")
+ */
+static void freq_qos_test_readd(struct kunit *test)
+{
+ struct freq_constraints qos;
+ struct freq_qos_request req;
+ int ret;
+
+ freq_constraints_init(&qos);
+ memset(&req, 0, sizeof(req));
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN),
+ FREQ_QOS_MIN_DEFAULT_VALUE);
+
+ /* Add */
+ ret = freq_qos_add_request(&qos, &req, FREQ_QOS_MIN, 1000);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 1000);
+
+ /* Remove */
+ ret = freq_qos_remove_request(&req);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN),
+ FREQ_QOS_MIN_DEFAULT_VALUE);
+
+ /* Add again */
+ ret = freq_qos_add_request(&qos, &req, FREQ_QOS_MIN, 2000);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 2000);
+}
+
+static struct kunit_case pm_qos_test_cases[] = {
+ KUNIT_CASE(freq_qos_test_min),
+ KUNIT_CASE(freq_qos_test_maxdef),
+ KUNIT_CASE(freq_qos_test_readd),
+ {},
+};
+
+static struct kunit_suite pm_qos_test_module = {
+ .name = "qos-kunit-test",
+ .test_cases = pm_qos_test_cases,
+};
+kunit_test_suites(&pm_qos_test_module);
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
new file mode 100644
index 000000000..8e93167f1
--- /dev/null
+++ b/drivers/base/power/qos.c
@@ -0,0 +1,982 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Devices PM QoS constraints management
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * This module exposes the interface to kernel space for specifying
+ * per-device PM QoS dependencies. It provides infrastructure for registration
+ * of:
+ *
+ * Dependents on a QoS value : register requests
+ * Watchers of QoS value : get notified when target QoS value changes
+ *
+ * This QoS design is best effort based. Dependents register their QoS needs.
+ * Watchers register to keep track of the current QoS needs of the system.
+ * Watchers can register a per-device notification callback using the
+ * dev_pm_qos_*_notifier API. The notification chain data is stored in the
+ * per-device constraint data struct.
+ *
+ * Note about the per-device constraint data struct allocation:
+ * . The per-device constraints data struct ptr is stored into the device
+ * dev_pm_info.
+ * . To minimize the data usage by the per-device constraints, the data struct
+ * is only allocated at the first call to dev_pm_qos_add_request.
+ * . The data is later free'd when the device is removed from the system.
+ * . A global mutex protects the constraints users from the data being
+ * allocated and free'd.
+ */
+
+#include <linux/pm_qos.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/export.h>
+#include <linux/pm_runtime.h>
+#include <linux/err.h>
+#include <trace/events/power.h>
+
+#include "power.h"
+
+static DEFINE_MUTEX(dev_pm_qos_mtx);
+static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
+
+/**
+ * __dev_pm_qos_flags - Check PM QoS flags for a given device.
+ * @dev: Device to check the PM QoS flags for.
+ * @mask: Flags to check against.
+ *
+ * This routine must be called with dev->power.lock held.
+ */
+enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
+{
+ struct dev_pm_qos *qos = dev->power.qos;
+ struct pm_qos_flags *pqf;
+ s32 val;
+
+ lockdep_assert_held(&dev->power.lock);
+
+ if (IS_ERR_OR_NULL(qos))
+ return PM_QOS_FLAGS_UNDEFINED;
+
+ pqf = &qos->flags;
+ if (list_empty(&pqf->list))
+ return PM_QOS_FLAGS_UNDEFINED;
+
+ val = pqf->effective_flags & mask;
+ if (val)
+ return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
+
+ return PM_QOS_FLAGS_NONE;
+}
+
+/**
+ * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
+ * @dev: Device to check the PM QoS flags for.
+ * @mask: Flags to check against.
+ */
+enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
+{
+ unsigned long irqflags;
+ enum pm_qos_flags_status ret;
+
+ spin_lock_irqsave(&dev->power.lock, irqflags);
+ ret = __dev_pm_qos_flags(dev, mask);
+ spin_unlock_irqrestore(&dev->power.lock, irqflags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
+
+/**
+ * __dev_pm_qos_resume_latency - Get resume latency constraint for a given device.
+ * @dev: Device to get the PM QoS constraint value for.
+ *
+ * This routine must be called with dev->power.lock held.
+ */
+s32 __dev_pm_qos_resume_latency(struct device *dev)
+{
+ lockdep_assert_held(&dev->power.lock);
+
+ return dev_pm_qos_raw_resume_latency(dev);
+}
+
+/**
+ * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
+ * @dev: Device to get the PM QoS constraint value for.
+ * @type: QoS request type.
+ */
+s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
+{
+ struct dev_pm_qos *qos = dev->power.qos;
+ unsigned long flags;
+ s32 ret;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ switch (type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
+ ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT
+ : pm_qos_read_value(&qos->resume_latency);
+ break;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE
+ : freq_qos_read_value(&qos->freq, FREQ_QOS_MIN);
+ break;
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
+ : freq_qos_read_value(&qos->freq, FREQ_QOS_MAX);
+ break;
+ default:
+ WARN_ON(1);
+ ret = 0;
+ }
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return ret;
+}
+
+/**
+ * apply_constraint - Add/modify/remove device PM QoS request.
+ * @req: Constraint request to apply
+ * @action: Action to perform (add/update/remove).
+ * @value: Value to assign to the QoS request.
+ *
+ * Internal function to update the constraints list using the PM QoS core
+ * code and if needed call the per-device callbacks.
+ */
+static int apply_constraint(struct dev_pm_qos_request *req,
+ enum pm_qos_req_action action, s32 value)
+{
+ struct dev_pm_qos *qos = req->dev->power.qos;
+ int ret;
+
+ switch(req->type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
+ if (WARN_ON(action != PM_QOS_REMOVE_REQ && value < 0))
+ value = 0;
+
+ ret = pm_qos_update_target(&qos->resume_latency,
+ &req->data.pnode, action, value);
+ break;
+ case DEV_PM_QOS_LATENCY_TOLERANCE:
+ ret = pm_qos_update_target(&qos->latency_tolerance,
+ &req->data.pnode, action, value);
+ if (ret) {
+ value = pm_qos_read_value(&qos->latency_tolerance);
+ req->dev->power.set_latency_tolerance(req->dev, value);
+ }
+ break;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ ret = freq_qos_apply(&req->data.freq, action, value);
+ break;
+ case DEV_PM_QOS_FLAGS:
+ ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
+ action, value);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/*
+ * dev_pm_qos_constraints_allocate
+ * @dev: device to allocate data for
+ *
+ * Called at the first call to add_request, for constraint data allocation
+ * Must be called with the dev_pm_qos_mtx mutex held
+ */
+static int dev_pm_qos_constraints_allocate(struct device *dev)
+{
+ struct dev_pm_qos *qos;
+ struct pm_qos_constraints *c;
+ struct blocking_notifier_head *n;
+
+ qos = kzalloc(sizeof(*qos), GFP_KERNEL);
+ if (!qos)
+ return -ENOMEM;
+
+ n = kzalloc(3 * sizeof(*n), GFP_KERNEL);
+ if (!n) {
+ kfree(qos);
+ return -ENOMEM;
+ }
+
+ c = &qos->resume_latency;
+ plist_head_init(&c->list);
+ c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
+ c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
+ c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
+ c->type = PM_QOS_MIN;
+ c->notifiers = n;
+ BLOCKING_INIT_NOTIFIER_HEAD(n);
+
+ c = &qos->latency_tolerance;
+ plist_head_init(&c->list);
+ c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
+ c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
+ c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
+ c->type = PM_QOS_MIN;
+
+ freq_constraints_init(&qos->freq);
+
+ INIT_LIST_HEAD(&qos->flags.list);
+
+ spin_lock_irq(&dev->power.lock);
+ dev->power.qos = qos;
+ spin_unlock_irq(&dev->power.lock);
+
+ return 0;
+}
+
+static void __dev_pm_qos_hide_latency_limit(struct device *dev);
+static void __dev_pm_qos_hide_flags(struct device *dev);
+
+/**
+ * dev_pm_qos_constraints_destroy
+ * @dev: target device
+ *
+ * Called from the device PM subsystem on device removal under device_pm_lock().
+ */
+void dev_pm_qos_constraints_destroy(struct device *dev)
+{
+ struct dev_pm_qos *qos;
+ struct dev_pm_qos_request *req, *tmp;
+ struct pm_qos_constraints *c;
+ struct pm_qos_flags *f;
+
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+
+ /*
+ * If the device's PM QoS resume latency limit or PM QoS flags have been
+ * exposed to user space, they have to be hidden at this point.
+ */
+ pm_qos_sysfs_remove_resume_latency(dev);
+ pm_qos_sysfs_remove_flags(dev);
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ __dev_pm_qos_hide_latency_limit(dev);
+ __dev_pm_qos_hide_flags(dev);
+
+ qos = dev->power.qos;
+ if (!qos)
+ goto out;
+
+ /* Flush the constraints lists for the device. */
+ c = &qos->resume_latency;
+ plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
+ /*
+ * Update constraints list and call the notification
+ * callbacks if needed
+ */
+ apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ }
+
+ c = &qos->latency_tolerance;
+ plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
+ apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ }
+
+ c = &qos->freq.min_freq;
+ plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
+ apply_constraint(req, PM_QOS_REMOVE_REQ,
+ PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ }
+
+ c = &qos->freq.max_freq;
+ plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
+ apply_constraint(req, PM_QOS_REMOVE_REQ,
+ PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ }
+
+ f = &qos->flags;
+ list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
+ apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ }
+
+ spin_lock_irq(&dev->power.lock);
+ dev->power.qos = ERR_PTR(-ENODEV);
+ spin_unlock_irq(&dev->power.lock);
+
+ kfree(qos->resume_latency.notifiers);
+ kfree(qos);
+
+ out:
+ mutex_unlock(&dev_pm_qos_mtx);
+
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
+}
+
+static bool dev_pm_qos_invalid_req_type(struct device *dev,
+ enum dev_pm_qos_req_type type)
+{
+ return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
+ !dev->power.set_latency_tolerance;
+}
+
+static int __dev_pm_qos_add_request(struct device *dev,
+ struct dev_pm_qos_request *req,
+ enum dev_pm_qos_req_type type, s32 value)
+{
+ int ret = 0;
+
+ if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
+ return -EINVAL;
+
+ if (WARN(dev_pm_qos_request_active(req),
+ "%s() called for already added request\n", __func__))
+ return -EINVAL;
+
+ if (IS_ERR(dev->power.qos))
+ ret = -ENODEV;
+ else if (!dev->power.qos)
+ ret = dev_pm_qos_constraints_allocate(dev);
+
+ trace_dev_pm_qos_add_request(dev_name(dev), type, value);
+ if (ret)
+ return ret;
+
+ req->dev = dev;
+ req->type = type;
+ if (req->type == DEV_PM_QOS_MIN_FREQUENCY)
+ ret = freq_qos_add_request(&dev->power.qos->freq,
+ &req->data.freq,
+ FREQ_QOS_MIN, value);
+ else if (req->type == DEV_PM_QOS_MAX_FREQUENCY)
+ ret = freq_qos_add_request(&dev->power.qos->freq,
+ &req->data.freq,
+ FREQ_QOS_MAX, value);
+ else
+ ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
+
+ return ret;
+}
+
+/**
+ * dev_pm_qos_add_request - inserts new qos request into the list
+ * @dev: target device for the constraint
+ * @req: pointer to a preallocated handle
+ * @type: type of the request
+ * @value: defines the qos request
+ *
+ * This function inserts a new entry in the device constraints list of
+ * requested qos performance characteristics. It recomputes the aggregate
+ * QoS expectations of parameters and initializes the dev_pm_qos_request
+ * handle. Caller needs to save this handle for later use in updates and
+ * removal.
+ *
+ * Returns 1 if the aggregated constraint value has changed,
+ * 0 if the aggregated constraint value has not changed,
+ * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
+ * to allocate for data structures, -ENODEV if the device has just been removed
+ * from the system.
+ *
+ * Callers should ensure that the target device is not RPM_SUSPENDED before
+ * using this function for requests of type DEV_PM_QOS_FLAGS.
+ */
+int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
+ enum dev_pm_qos_req_type type, s32 value)
+{
+ int ret;
+
+ mutex_lock(&dev_pm_qos_mtx);
+ ret = __dev_pm_qos_add_request(dev, req, type, value);
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
+
+/**
+ * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
+ * @req : PM QoS request to modify.
+ * @new_value: New value to request.
+ */
+static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
+ s32 new_value)
+{
+ s32 curr_value;
+ int ret = 0;
+
+ if (!req) /*guard against callers passing in null */
+ return -EINVAL;
+
+ if (WARN(!dev_pm_qos_request_active(req),
+ "%s() called for unknown object\n", __func__))
+ return -EINVAL;
+
+ if (IS_ERR_OR_NULL(req->dev->power.qos))
+ return -ENODEV;
+
+ switch(req->type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
+ case DEV_PM_QOS_LATENCY_TOLERANCE:
+ curr_value = req->data.pnode.prio;
+ break;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ curr_value = req->data.freq.pnode.prio;
+ break;
+ case DEV_PM_QOS_FLAGS:
+ curr_value = req->data.flr.flags;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
+ new_value);
+ if (curr_value != new_value)
+ ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
+
+ return ret;
+}
+
+/**
+ * dev_pm_qos_update_request - modifies an existing qos request
+ * @req : handle to list element holding a dev_pm_qos request to use
+ * @new_value: defines the qos request
+ *
+ * Updates an existing dev PM qos request along with updating the
+ * target value.
+ *
+ * Attempts are made to make this code callable on hot code paths.
+ *
+ * Returns 1 if the aggregated constraint value has changed,
+ * 0 if the aggregated constraint value has not changed,
+ * -EINVAL in case of wrong parameters, -ENODEV if the device has been
+ * removed from the system
+ *
+ * Callers should ensure that the target device is not RPM_SUSPENDED before
+ * using this function for requests of type DEV_PM_QOS_FLAGS.
+ */
+int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
+{
+ int ret;
+
+ mutex_lock(&dev_pm_qos_mtx);
+ ret = __dev_pm_qos_update_request(req, new_value);
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
+
+static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
+{
+ int ret;
+
+ if (!req) /*guard against callers passing in null */
+ return -EINVAL;
+
+ if (WARN(!dev_pm_qos_request_active(req),
+ "%s() called for unknown object\n", __func__))
+ return -EINVAL;
+
+ if (IS_ERR_OR_NULL(req->dev->power.qos))
+ return -ENODEV;
+
+ trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
+ PM_QOS_DEFAULT_VALUE);
+ ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ return ret;
+}
+
+/**
+ * dev_pm_qos_remove_request - modifies an existing qos request
+ * @req: handle to request list element
+ *
+ * Will remove pm qos request from the list of constraints and
+ * recompute the current target value. Call this on slow code paths.
+ *
+ * Returns 1 if the aggregated constraint value has changed,
+ * 0 if the aggregated constraint value has not changed,
+ * -EINVAL in case of wrong parameters, -ENODEV if the device has been
+ * removed from the system
+ *
+ * Callers should ensure that the target device is not RPM_SUSPENDED before
+ * using this function for requests of type DEV_PM_QOS_FLAGS.
+ */
+int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
+{
+ int ret;
+
+ mutex_lock(&dev_pm_qos_mtx);
+ ret = __dev_pm_qos_remove_request(req);
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
+
+/**
+ * dev_pm_qos_add_notifier - sets notification entry for changes to target value
+ * of per-device PM QoS constraints
+ *
+ * @dev: target device for the constraint
+ * @notifier: notifier block managed by caller.
+ * @type: request type.
+ *
+ * Will register the notifier into a notification chain that gets called
+ * upon changes to the target value for the device.
+ *
+ * If the device's constraints object doesn't exist when this routine is called,
+ * it will be created (or error code will be returned if that fails).
+ */
+int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier,
+ enum dev_pm_qos_req_type type)
+{
+ int ret = 0;
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (IS_ERR(dev->power.qos))
+ ret = -ENODEV;
+ else if (!dev->power.qos)
+ ret = dev_pm_qos_constraints_allocate(dev);
+
+ if (ret)
+ goto unlock;
+
+ switch (type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
+ ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
+ notifier);
+ break;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ ret = freq_qos_add_notifier(&dev->power.qos->freq,
+ FREQ_QOS_MIN, notifier);
+ break;
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ ret = freq_qos_add_notifier(&dev->power.qos->freq,
+ FREQ_QOS_MAX, notifier);
+ break;
+ default:
+ WARN_ON(1);
+ ret = -EINVAL;
+ }
+
+unlock:
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
+
+/**
+ * dev_pm_qos_remove_notifier - deletes notification for changes to target value
+ * of per-device PM QoS constraints
+ *
+ * @dev: target device for the constraint
+ * @notifier: notifier block to be removed.
+ * @type: request type.
+ *
+ * Will remove the notifier from the notification chain that gets called
+ * upon changes to the target value.
+ */
+int dev_pm_qos_remove_notifier(struct device *dev,
+ struct notifier_block *notifier,
+ enum dev_pm_qos_req_type type)
+{
+ int ret = 0;
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ /* Silently return if the constraints object is not present. */
+ if (IS_ERR_OR_NULL(dev->power.qos))
+ goto unlock;
+
+ switch (type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
+ ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
+ notifier);
+ break;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ ret = freq_qos_remove_notifier(&dev->power.qos->freq,
+ FREQ_QOS_MIN, notifier);
+ break;
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ ret = freq_qos_remove_notifier(&dev->power.qos->freq,
+ FREQ_QOS_MAX, notifier);
+ break;
+ default:
+ WARN_ON(1);
+ ret = -EINVAL;
+ }
+
+unlock:
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
+
+/**
+ * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
+ * @dev: Device whose ancestor to add the request for.
+ * @req: Pointer to the preallocated handle.
+ * @type: Type of the request.
+ * @value: Constraint latency value.
+ */
+int dev_pm_qos_add_ancestor_request(struct device *dev,
+ struct dev_pm_qos_request *req,
+ enum dev_pm_qos_req_type type, s32 value)
+{
+ struct device *ancestor = dev->parent;
+ int ret = -ENODEV;
+
+ switch (type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
+ while (ancestor && !ancestor->power.ignore_children)
+ ancestor = ancestor->parent;
+
+ break;
+ case DEV_PM_QOS_LATENCY_TOLERANCE:
+ while (ancestor && !ancestor->power.set_latency_tolerance)
+ ancestor = ancestor->parent;
+
+ break;
+ default:
+ ancestor = NULL;
+ }
+ if (ancestor)
+ ret = dev_pm_qos_add_request(ancestor, req, type, value);
+
+ if (ret < 0)
+ req->dev = NULL;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
+
+static void __dev_pm_qos_drop_user_request(struct device *dev,
+ enum dev_pm_qos_req_type type)
+{
+ struct dev_pm_qos_request *req = NULL;
+
+ switch(type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
+ req = dev->power.qos->resume_latency_req;
+ dev->power.qos->resume_latency_req = NULL;
+ break;
+ case DEV_PM_QOS_LATENCY_TOLERANCE:
+ req = dev->power.qos->latency_tolerance_req;
+ dev->power.qos->latency_tolerance_req = NULL;
+ break;
+ case DEV_PM_QOS_FLAGS:
+ req = dev->power.qos->flags_req;
+ dev->power.qos->flags_req = NULL;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+ __dev_pm_qos_remove_request(req);
+ kfree(req);
+}
+
+static void dev_pm_qos_drop_user_request(struct device *dev,
+ enum dev_pm_qos_req_type type)
+{
+ mutex_lock(&dev_pm_qos_mtx);
+ __dev_pm_qos_drop_user_request(dev, type);
+ mutex_unlock(&dev_pm_qos_mtx);
+}
+
+/**
+ * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
+ * @dev: Device whose PM QoS latency limit is to be exposed to user space.
+ * @value: Initial value of the latency limit.
+ */
+int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
+{
+ struct dev_pm_qos_request *req;
+ int ret;
+
+ if (!device_is_registered(dev) || value < 0)
+ return -EINVAL;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
+ if (ret < 0) {
+ kfree(req);
+ return ret;
+ }
+
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (IS_ERR_OR_NULL(dev->power.qos))
+ ret = -ENODEV;
+ else if (dev->power.qos->resume_latency_req)
+ ret = -EEXIST;
+
+ if (ret < 0) {
+ __dev_pm_qos_remove_request(req);
+ kfree(req);
+ mutex_unlock(&dev_pm_qos_mtx);
+ goto out;
+ }
+ dev->power.qos->resume_latency_req = req;
+
+ mutex_unlock(&dev_pm_qos_mtx);
+
+ ret = pm_qos_sysfs_add_resume_latency(dev);
+ if (ret)
+ dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
+
+ out:
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
+
+static void __dev_pm_qos_hide_latency_limit(struct device *dev)
+{
+ if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
+}
+
+/**
+ * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
+ * @dev: Device whose PM QoS latency limit is to be hidden from user space.
+ */
+void dev_pm_qos_hide_latency_limit(struct device *dev)
+{
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+
+ pm_qos_sysfs_remove_resume_latency(dev);
+
+ mutex_lock(&dev_pm_qos_mtx);
+ __dev_pm_qos_hide_latency_limit(dev);
+ mutex_unlock(&dev_pm_qos_mtx);
+
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
+
+/**
+ * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
+ * @dev: Device whose PM QoS flags are to be exposed to user space.
+ * @val: Initial values of the flags.
+ */
+int dev_pm_qos_expose_flags(struct device *dev, s32 val)
+{
+ struct dev_pm_qos_request *req;
+ int ret;
+
+ if (!device_is_registered(dev))
+ return -EINVAL;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
+ if (ret < 0) {
+ kfree(req);
+ return ret;
+ }
+
+ pm_runtime_get_sync(dev);
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (IS_ERR_OR_NULL(dev->power.qos))
+ ret = -ENODEV;
+ else if (dev->power.qos->flags_req)
+ ret = -EEXIST;
+
+ if (ret < 0) {
+ __dev_pm_qos_remove_request(req);
+ kfree(req);
+ mutex_unlock(&dev_pm_qos_mtx);
+ goto out;
+ }
+ dev->power.qos->flags_req = req;
+
+ mutex_unlock(&dev_pm_qos_mtx);
+
+ ret = pm_qos_sysfs_add_flags(dev);
+ if (ret)
+ dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
+
+ out:
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
+ pm_runtime_put(dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
+
+static void __dev_pm_qos_hide_flags(struct device *dev)
+{
+ if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
+}
+
+/**
+ * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
+ * @dev: Device whose PM QoS flags are to be hidden from user space.
+ */
+void dev_pm_qos_hide_flags(struct device *dev)
+{
+ pm_runtime_get_sync(dev);
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+
+ pm_qos_sysfs_remove_flags(dev);
+
+ mutex_lock(&dev_pm_qos_mtx);
+ __dev_pm_qos_hide_flags(dev);
+ mutex_unlock(&dev_pm_qos_mtx);
+
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
+ pm_runtime_put(dev);
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
+
+/**
+ * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
+ * @dev: Device to update the PM QoS flags request for.
+ * @mask: Flags to set/clear.
+ * @set: Whether to set or clear the flags (true means set).
+ */
+int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
+{
+ s32 value;
+ int ret;
+
+ pm_runtime_get_sync(dev);
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ value = dev_pm_qos_requested_flags(dev);
+ if (set)
+ value |= mask;
+ else
+ value &= ~mask;
+
+ ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
+
+ out:
+ mutex_unlock(&dev_pm_qos_mtx);
+ pm_runtime_put(dev);
+ return ret;
+}
+
+/**
+ * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
+ * @dev: Device to obtain the user space latency tolerance for.
+ */
+s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
+{
+ s32 ret;
+
+ mutex_lock(&dev_pm_qos_mtx);
+ ret = IS_ERR_OR_NULL(dev->power.qos)
+ || !dev->power.qos->latency_tolerance_req ?
+ PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
+ dev->power.qos->latency_tolerance_req->data.pnode.prio;
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
+
+/**
+ * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
+ * @dev: Device to update the user space latency tolerance for.
+ * @val: New user space latency tolerance for @dev (negative values disable).
+ */
+int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
+{
+ int ret;
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (IS_ERR_OR_NULL(dev->power.qos)
+ || !dev->power.qos->latency_tolerance_req) {
+ struct dev_pm_qos_request *req;
+
+ if (val < 0) {
+ if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
+ ret = 0;
+ else
+ ret = -EINVAL;
+ goto out;
+ }
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
+ if (ret < 0) {
+ kfree(req);
+ goto out;
+ }
+ dev->power.qos->latency_tolerance_req = req;
+ } else {
+ if (val < 0) {
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
+ ret = 0;
+ } else {
+ ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
+ }
+ }
+
+ out:
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
+
+/**
+ * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
+ * @dev: Device whose latency tolerance to expose
+ */
+int dev_pm_qos_expose_latency_tolerance(struct device *dev)
+{
+ int ret;
+
+ if (!dev->power.set_latency_tolerance)
+ return -EINVAL;
+
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+ ret = pm_qos_sysfs_add_latency_tolerance(dev);
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
+
+/**
+ * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
+ * @dev: Device whose latency tolerance to hide
+ */
+void dev_pm_qos_hide_latency_tolerance(struct device *dev)
+{
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+ pm_qos_sysfs_remove_latency_tolerance(dev);
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
+
+ /* Remove the request from user space now */
+ pm_runtime_get_sync(dev);
+ dev_pm_qos_update_user_latency_tolerance(dev,
+ PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
+ pm_runtime_put(dev);
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
new file mode 100644
index 000000000..fbbc3ed14
--- /dev/null
+++ b/drivers/base/power/runtime.c
@@ -0,0 +1,1892 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/base/power/runtime.c - Helper functions for device runtime PM
+ *
+ * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
+ */
+#include <linux/sched/mm.h>
+#include <linux/ktime.h>
+#include <linux/hrtimer.h>
+#include <linux/export.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
+#include <trace/events/rpm.h>
+
+#include "../base.h"
+#include "power.h"
+
+typedef int (*pm_callback_t)(struct device *);
+
+static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
+{
+ pm_callback_t cb;
+ const struct dev_pm_ops *ops;
+
+ if (dev->pm_domain)
+ ops = &dev->pm_domain->ops;
+ else if (dev->type && dev->type->pm)
+ ops = dev->type->pm;
+ else if (dev->class && dev->class->pm)
+ ops = dev->class->pm;
+ else if (dev->bus && dev->bus->pm)
+ ops = dev->bus->pm;
+ else
+ ops = NULL;
+
+ if (ops)
+ cb = *(pm_callback_t *)((void *)ops + cb_offset);
+ else
+ cb = NULL;
+
+ if (!cb && dev->driver && dev->driver->pm)
+ cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
+
+ return cb;
+}
+
+#define RPM_GET_CALLBACK(dev, callback) \
+ __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
+
+static int rpm_resume(struct device *dev, int rpmflags);
+static int rpm_suspend(struct device *dev, int rpmflags);
+
+/**
+ * update_pm_runtime_accounting - Update the time accounting of power states
+ * @dev: Device to update the accounting for
+ *
+ * In order to be able to have time accounting of the various power states
+ * (as used by programs such as PowerTOP to show the effectiveness of runtime
+ * PM), we need to track the time spent in each state.
+ * update_pm_runtime_accounting must be called each time before the
+ * runtime_status field is updated, to account the time in the old state
+ * correctly.
+ */
+static void update_pm_runtime_accounting(struct device *dev)
+{
+ u64 now, last, delta;
+
+ if (dev->power.disable_depth > 0)
+ return;
+
+ last = dev->power.accounting_timestamp;
+
+ now = ktime_get_mono_fast_ns();
+ dev->power.accounting_timestamp = now;
+
+ /*
+ * Because ktime_get_mono_fast_ns() is not monotonic during
+ * timekeeping updates, ensure that 'now' is after the last saved
+ * timesptamp.
+ */
+ if (now < last)
+ return;
+
+ delta = now - last;
+
+ if (dev->power.runtime_status == RPM_SUSPENDED)
+ dev->power.suspended_time += delta;
+ else
+ dev->power.active_time += delta;
+}
+
+static void __update_runtime_status(struct device *dev, enum rpm_status status)
+{
+ update_pm_runtime_accounting(dev);
+ dev->power.runtime_status = status;
+}
+
+static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
+{
+ u64 time;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ update_pm_runtime_accounting(dev);
+ time = suspended ? dev->power.suspended_time : dev->power.active_time;
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return time;
+}
+
+u64 pm_runtime_active_time(struct device *dev)
+{
+ return rpm_get_accounted_time(dev, false);
+}
+
+u64 pm_runtime_suspended_time(struct device *dev)
+{
+ return rpm_get_accounted_time(dev, true);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
+
+/**
+ * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
+ * @dev: Device to handle.
+ */
+static void pm_runtime_deactivate_timer(struct device *dev)
+{
+ if (dev->power.timer_expires > 0) {
+ hrtimer_try_to_cancel(&dev->power.suspend_timer);
+ dev->power.timer_expires = 0;
+ }
+}
+
+/**
+ * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
+ * @dev: Device to handle.
+ */
+static void pm_runtime_cancel_pending(struct device *dev)
+{
+ pm_runtime_deactivate_timer(dev);
+ /*
+ * In case there's a request pending, make sure its work function will
+ * return without doing anything.
+ */
+ dev->power.request = RPM_REQ_NONE;
+}
+
+/*
+ * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
+ * @dev: Device to handle.
+ *
+ * Compute the autosuspend-delay expiration time based on the device's
+ * power.last_busy time. If the delay has already expired or is disabled
+ * (negative) or the power.use_autosuspend flag isn't set, return 0.
+ * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
+ *
+ * This function may be called either with or without dev->power.lock held.
+ * Either way it can be racy, since power.last_busy may be updated at any time.
+ */
+u64 pm_runtime_autosuspend_expiration(struct device *dev)
+{
+ int autosuspend_delay;
+ u64 expires;
+
+ if (!dev->power.use_autosuspend)
+ return 0;
+
+ autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
+ if (autosuspend_delay < 0)
+ return 0;
+
+ expires = READ_ONCE(dev->power.last_busy);
+ expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
+ if (expires > ktime_get_mono_fast_ns())
+ return expires; /* Expires in the future */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
+
+static int dev_memalloc_noio(struct device *dev, void *data)
+{
+ return dev->power.memalloc_noio;
+}
+
+/*
+ * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
+ * @dev: Device to handle.
+ * @enable: True for setting the flag and False for clearing the flag.
+ *
+ * Set the flag for all devices in the path from the device to the
+ * root device in the device tree if @enable is true, otherwise clear
+ * the flag for devices in the path whose siblings don't set the flag.
+ *
+ * The function should only be called by block device, or network
+ * device driver for solving the deadlock problem during runtime
+ * resume/suspend:
+ *
+ * If memory allocation with GFP_KERNEL is called inside runtime
+ * resume/suspend callback of any one of its ancestors(or the
+ * block device itself), the deadlock may be triggered inside the
+ * memory allocation since it might not complete until the block
+ * device becomes active and the involed page I/O finishes. The
+ * situation is pointed out first by Alan Stern. Network device
+ * are involved in iSCSI kind of situation.
+ *
+ * The lock of dev_hotplug_mutex is held in the function for handling
+ * hotplug race because pm_runtime_set_memalloc_noio() may be called
+ * in async probe().
+ *
+ * The function should be called between device_add() and device_del()
+ * on the affected device(block/network device).
+ */
+void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
+{
+ static DEFINE_MUTEX(dev_hotplug_mutex);
+
+ mutex_lock(&dev_hotplug_mutex);
+ for (;;) {
+ bool enabled;
+
+ /* hold power lock since bitfield is not SMP-safe. */
+ spin_lock_irq(&dev->power.lock);
+ enabled = dev->power.memalloc_noio;
+ dev->power.memalloc_noio = enable;
+ spin_unlock_irq(&dev->power.lock);
+
+ /*
+ * not need to enable ancestors any more if the device
+ * has been enabled.
+ */
+ if (enabled && enable)
+ break;
+
+ dev = dev->parent;
+
+ /*
+ * clear flag of the parent device only if all the
+ * children don't set the flag because ancestor's
+ * flag was set by any one of the descendants.
+ */
+ if (!dev || (!enable &&
+ device_for_each_child(dev, NULL,
+ dev_memalloc_noio)))
+ break;
+ }
+ mutex_unlock(&dev_hotplug_mutex);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
+
+/**
+ * rpm_check_suspend_allowed - Test whether a device may be suspended.
+ * @dev: Device to test.
+ */
+static int rpm_check_suspend_allowed(struct device *dev)
+{
+ int retval = 0;
+
+ if (dev->power.runtime_error)
+ retval = -EINVAL;
+ else if (dev->power.disable_depth > 0)
+ retval = -EACCES;
+ else if (atomic_read(&dev->power.usage_count) > 0)
+ retval = -EAGAIN;
+ else if (!dev->power.ignore_children &&
+ atomic_read(&dev->power.child_count))
+ retval = -EBUSY;
+
+ /* Pending resume requests take precedence over suspends. */
+ else if ((dev->power.deferred_resume
+ && dev->power.runtime_status == RPM_SUSPENDING)
+ || (dev->power.request_pending
+ && dev->power.request == RPM_REQ_RESUME))
+ retval = -EAGAIN;
+ else if (__dev_pm_qos_resume_latency(dev) == 0)
+ retval = -EPERM;
+ else if (dev->power.runtime_status == RPM_SUSPENDED)
+ retval = 1;
+
+ return retval;
+}
+
+static int rpm_get_suppliers(struct device *dev)
+{
+ struct device_link *link;
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
+ device_links_read_lock_held()) {
+ int retval;
+
+ if (!(link->flags & DL_FLAG_PM_RUNTIME))
+ continue;
+
+ retval = pm_runtime_get_sync(link->supplier);
+ /* Ignore suppliers with disabled runtime PM. */
+ if (retval < 0 && retval != -EACCES) {
+ pm_runtime_put_noidle(link->supplier);
+ return retval;
+ }
+ refcount_inc(&link->rpm_active);
+ }
+ return 0;
+}
+
+/**
+ * pm_runtime_release_supplier - Drop references to device link's supplier.
+ * @link: Target device link.
+ *
+ * Drop all runtime PM references associated with @link to its supplier device.
+ */
+void pm_runtime_release_supplier(struct device_link *link)
+{
+ struct device *supplier = link->supplier;
+
+ /*
+ * The additional power.usage_count check is a safety net in case
+ * the rpm_active refcount becomes saturated, in which case
+ * refcount_dec_not_one() would return true forever, but it is not
+ * strictly necessary.
+ */
+ while (refcount_dec_not_one(&link->rpm_active) &&
+ atomic_read(&supplier->power.usage_count) > 0)
+ pm_runtime_put_noidle(supplier);
+}
+
+static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
+{
+ struct device_link *link;
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
+ device_links_read_lock_held()) {
+ pm_runtime_release_supplier(link);
+ if (try_to_suspend)
+ pm_request_idle(link->supplier);
+ }
+}
+
+static void rpm_put_suppliers(struct device *dev)
+{
+ __rpm_put_suppliers(dev, true);
+}
+
+static void rpm_suspend_suppliers(struct device *dev)
+{
+ struct device_link *link;
+ int idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
+ device_links_read_lock_held())
+ pm_request_idle(link->supplier);
+
+ device_links_read_unlock(idx);
+}
+
+/**
+ * __rpm_callback - Run a given runtime PM callback for a given device.
+ * @cb: Runtime PM callback to run.
+ * @dev: Device to run the callback for.
+ */
+static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
+ __releases(&dev->power.lock) __acquires(&dev->power.lock)
+{
+ int retval, idx;
+ bool use_links = dev->power.links_count > 0;
+
+ if (dev->power.irq_safe) {
+ spin_unlock(&dev->power.lock);
+ } else {
+ spin_unlock_irq(&dev->power.lock);
+
+ /*
+ * Resume suppliers if necessary.
+ *
+ * The device's runtime PM status cannot change until this
+ * routine returns, so it is safe to read the status outside of
+ * the lock.
+ */
+ if (use_links && dev->power.runtime_status == RPM_RESUMING) {
+ idx = device_links_read_lock();
+
+ retval = rpm_get_suppliers(dev);
+ if (retval) {
+ rpm_put_suppliers(dev);
+ goto fail;
+ }
+
+ device_links_read_unlock(idx);
+ }
+ }
+
+ retval = cb(dev);
+
+ if (dev->power.irq_safe) {
+ spin_lock(&dev->power.lock);
+ } else {
+ /*
+ * If the device is suspending and the callback has returned
+ * success, drop the usage counters of the suppliers that have
+ * been reference counted on its resume.
+ *
+ * Do that if resume fails too.
+ */
+ if (use_links
+ && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
+ || (dev->power.runtime_status == RPM_RESUMING && retval))) {
+ idx = device_links_read_lock();
+
+ __rpm_put_suppliers(dev, false);
+
+fail:
+ device_links_read_unlock(idx);
+ }
+
+ spin_lock_irq(&dev->power.lock);
+ }
+
+ return retval;
+}
+
+/**
+ * rpm_idle - Notify device bus type if the device can be suspended.
+ * @dev: Device to notify the bus type about.
+ * @rpmflags: Flag bits.
+ *
+ * Check if the device's runtime PM status allows it to be suspended. If
+ * another idle notification has been started earlier, return immediately. If
+ * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
+ * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
+ * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
+ *
+ * This function must be called under dev->power.lock with interrupts disabled.
+ */
+static int rpm_idle(struct device *dev, int rpmflags)
+{
+ int (*callback)(struct device *);
+ int retval;
+
+ trace_rpm_idle_rcuidle(dev, rpmflags);
+ retval = rpm_check_suspend_allowed(dev);
+ if (retval < 0)
+ ; /* Conditions are wrong. */
+
+ /* Idle notifications are allowed only in the RPM_ACTIVE state. */
+ else if (dev->power.runtime_status != RPM_ACTIVE)
+ retval = -EAGAIN;
+
+ /*
+ * Any pending request other than an idle notification takes
+ * precedence over us, except that the timer may be running.
+ */
+ else if (dev->power.request_pending &&
+ dev->power.request > RPM_REQ_IDLE)
+ retval = -EAGAIN;
+
+ /* Act as though RPM_NOWAIT is always set. */
+ else if (dev->power.idle_notification)
+ retval = -EINPROGRESS;
+ if (retval)
+ goto out;
+
+ /* Pending requests need to be canceled. */
+ dev->power.request = RPM_REQ_NONE;
+
+ callback = RPM_GET_CALLBACK(dev, runtime_idle);
+
+ /* If no callback assume success. */
+ if (!callback || dev->power.no_callbacks)
+ goto out;
+
+ /* Carry out an asynchronous or a synchronous idle notification. */
+ if (rpmflags & RPM_ASYNC) {
+ dev->power.request = RPM_REQ_IDLE;
+ if (!dev->power.request_pending) {
+ dev->power.request_pending = true;
+ queue_work(pm_wq, &dev->power.work);
+ }
+ trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
+ return 0;
+ }
+
+ dev->power.idle_notification = true;
+
+ if (dev->power.irq_safe)
+ spin_unlock(&dev->power.lock);
+ else
+ spin_unlock_irq(&dev->power.lock);
+
+ retval = callback(dev);
+
+ if (dev->power.irq_safe)
+ spin_lock(&dev->power.lock);
+ else
+ spin_lock_irq(&dev->power.lock);
+
+ dev->power.idle_notification = false;
+ wake_up_all(&dev->power.wait_queue);
+
+ out:
+ trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
+ return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
+}
+
+/**
+ * rpm_callback - Run a given runtime PM callback for a given device.
+ * @cb: Runtime PM callback to run.
+ * @dev: Device to run the callback for.
+ */
+static int rpm_callback(int (*cb)(struct device *), struct device *dev)
+{
+ int retval;
+
+ if (!cb)
+ return -ENOSYS;
+
+ if (dev->power.memalloc_noio) {
+ unsigned int noio_flag;
+
+ /*
+ * Deadlock might be caused if memory allocation with
+ * GFP_KERNEL happens inside runtime_suspend and
+ * runtime_resume callbacks of one block device's
+ * ancestor or the block device itself. Network
+ * device might be thought as part of iSCSI block
+ * device, so network device and its ancestor should
+ * be marked as memalloc_noio too.
+ */
+ noio_flag = memalloc_noio_save();
+ retval = __rpm_callback(cb, dev);
+ memalloc_noio_restore(noio_flag);
+ } else {
+ retval = __rpm_callback(cb, dev);
+ }
+
+ dev->power.runtime_error = retval;
+ return retval != -EACCES ? retval : -EIO;
+}
+
+/**
+ * rpm_suspend - Carry out runtime suspend of given device.
+ * @dev: Device to suspend.
+ * @rpmflags: Flag bits.
+ *
+ * Check if the device's runtime PM status allows it to be suspended.
+ * Cancel a pending idle notification, autosuspend or suspend. If
+ * another suspend has been started earlier, either return immediately
+ * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
+ * flags. If the RPM_ASYNC flag is set then queue a suspend request;
+ * otherwise run the ->runtime_suspend() callback directly. When
+ * ->runtime_suspend succeeded, if a deferred resume was requested while
+ * the callback was running then carry it out, otherwise send an idle
+ * notification for its parent (if the suspend succeeded and both
+ * ignore_children of parent->power and irq_safe of dev->power are not set).
+ * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
+ * flag is set and the next autosuspend-delay expiration time is in the
+ * future, schedule another autosuspend attempt.
+ *
+ * This function must be called under dev->power.lock with interrupts disabled.
+ */
+static int rpm_suspend(struct device *dev, int rpmflags)
+ __releases(&dev->power.lock) __acquires(&dev->power.lock)
+{
+ int (*callback)(struct device *);
+ struct device *parent = NULL;
+ int retval;
+
+ trace_rpm_suspend_rcuidle(dev, rpmflags);
+
+ repeat:
+ retval = rpm_check_suspend_allowed(dev);
+ if (retval < 0)
+ goto out; /* Conditions are wrong. */
+
+ /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
+ if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
+ retval = -EAGAIN;
+ if (retval)
+ goto out;
+
+ /* If the autosuspend_delay time hasn't expired yet, reschedule. */
+ if ((rpmflags & RPM_AUTO)
+ && dev->power.runtime_status != RPM_SUSPENDING) {
+ u64 expires = pm_runtime_autosuspend_expiration(dev);
+
+ if (expires != 0) {
+ /* Pending requests need to be canceled. */
+ dev->power.request = RPM_REQ_NONE;
+
+ /*
+ * Optimization: If the timer is already running and is
+ * set to expire at or before the autosuspend delay,
+ * avoid the overhead of resetting it. Just let it
+ * expire; pm_suspend_timer_fn() will take care of the
+ * rest.
+ */
+ if (!(dev->power.timer_expires &&
+ dev->power.timer_expires <= expires)) {
+ /*
+ * We add a slack of 25% to gather wakeups
+ * without sacrificing the granularity.
+ */
+ u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
+ (NSEC_PER_MSEC >> 2);
+
+ dev->power.timer_expires = expires;
+ hrtimer_start_range_ns(&dev->power.suspend_timer,
+ ns_to_ktime(expires),
+ slack,
+ HRTIMER_MODE_ABS);
+ }
+ dev->power.timer_autosuspends = 1;
+ goto out;
+ }
+ }
+
+ /* Other scheduled or pending requests need to be canceled. */
+ pm_runtime_cancel_pending(dev);
+
+ if (dev->power.runtime_status == RPM_SUSPENDING) {
+ DEFINE_WAIT(wait);
+
+ if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
+ retval = -EINPROGRESS;
+ goto out;
+ }
+
+ if (dev->power.irq_safe) {
+ spin_unlock(&dev->power.lock);
+
+ cpu_relax();
+
+ spin_lock(&dev->power.lock);
+ goto repeat;
+ }
+
+ /* Wait for the other suspend running in parallel with us. */
+ for (;;) {
+ prepare_to_wait(&dev->power.wait_queue, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (dev->power.runtime_status != RPM_SUSPENDING)
+ break;
+
+ spin_unlock_irq(&dev->power.lock);
+
+ schedule();
+
+ spin_lock_irq(&dev->power.lock);
+ }
+ finish_wait(&dev->power.wait_queue, &wait);
+ goto repeat;
+ }
+
+ if (dev->power.no_callbacks)
+ goto no_callback; /* Assume success. */
+
+ /* Carry out an asynchronous or a synchronous suspend. */
+ if (rpmflags & RPM_ASYNC) {
+ dev->power.request = (rpmflags & RPM_AUTO) ?
+ RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
+ if (!dev->power.request_pending) {
+ dev->power.request_pending = true;
+ queue_work(pm_wq, &dev->power.work);
+ }
+ goto out;
+ }
+
+ __update_runtime_status(dev, RPM_SUSPENDING);
+
+ callback = RPM_GET_CALLBACK(dev, runtime_suspend);
+
+ dev_pm_enable_wake_irq_check(dev, true);
+ retval = rpm_callback(callback, dev);
+ if (retval)
+ goto fail;
+
+ dev_pm_enable_wake_irq_complete(dev);
+
+ no_callback:
+ __update_runtime_status(dev, RPM_SUSPENDED);
+ pm_runtime_deactivate_timer(dev);
+
+ if (dev->parent) {
+ parent = dev->parent;
+ atomic_add_unless(&parent->power.child_count, -1, 0);
+ }
+ wake_up_all(&dev->power.wait_queue);
+
+ if (dev->power.deferred_resume) {
+ dev->power.deferred_resume = false;
+ rpm_resume(dev, 0);
+ retval = -EAGAIN;
+ goto out;
+ }
+
+ if (dev->power.irq_safe)
+ goto out;
+
+ /* Maybe the parent is now able to suspend. */
+ if (parent && !parent->power.ignore_children) {
+ spin_unlock(&dev->power.lock);
+
+ spin_lock(&parent->power.lock);
+ rpm_idle(parent, RPM_ASYNC);
+ spin_unlock(&parent->power.lock);
+
+ spin_lock(&dev->power.lock);
+ }
+ /* Maybe the suppliers are now able to suspend. */
+ if (dev->power.links_count > 0) {
+ spin_unlock_irq(&dev->power.lock);
+
+ rpm_suspend_suppliers(dev);
+
+ spin_lock_irq(&dev->power.lock);
+ }
+
+ out:
+ trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
+
+ return retval;
+
+ fail:
+ dev_pm_disable_wake_irq_check(dev, true);
+ __update_runtime_status(dev, RPM_ACTIVE);
+ dev->power.deferred_resume = false;
+ wake_up_all(&dev->power.wait_queue);
+
+ if (retval == -EAGAIN || retval == -EBUSY) {
+ dev->power.runtime_error = 0;
+
+ /*
+ * If the callback routine failed an autosuspend, and
+ * if the last_busy time has been updated so that there
+ * is a new autosuspend expiration time, automatically
+ * reschedule another autosuspend.
+ */
+ if ((rpmflags & RPM_AUTO) &&
+ pm_runtime_autosuspend_expiration(dev) != 0)
+ goto repeat;
+ } else {
+ pm_runtime_cancel_pending(dev);
+ }
+ goto out;
+}
+
+/**
+ * rpm_resume - Carry out runtime resume of given device.
+ * @dev: Device to resume.
+ * @rpmflags: Flag bits.
+ *
+ * Check if the device's runtime PM status allows it to be resumed. Cancel
+ * any scheduled or pending requests. If another resume has been started
+ * earlier, either return immediately or wait for it to finish, depending on the
+ * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
+ * parallel with this function, either tell the other process to resume after
+ * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
+ * flag is set then queue a resume request; otherwise run the
+ * ->runtime_resume() callback directly. Queue an idle notification for the
+ * device if the resume succeeded.
+ *
+ * This function must be called under dev->power.lock with interrupts disabled.
+ */
+static int rpm_resume(struct device *dev, int rpmflags)
+ __releases(&dev->power.lock) __acquires(&dev->power.lock)
+{
+ int (*callback)(struct device *);
+ struct device *parent = NULL;
+ int retval = 0;
+
+ trace_rpm_resume_rcuidle(dev, rpmflags);
+
+ repeat:
+ if (dev->power.runtime_error)
+ retval = -EINVAL;
+ else if (dev->power.disable_depth == 1 && dev->power.is_suspended
+ && dev->power.runtime_status == RPM_ACTIVE)
+ retval = 1;
+ else if (dev->power.disable_depth > 0)
+ retval = -EACCES;
+ if (retval)
+ goto out;
+
+ /*
+ * Other scheduled or pending requests need to be canceled. Small
+ * optimization: If an autosuspend timer is running, leave it running
+ * rather than cancelling it now only to restart it again in the near
+ * future.
+ */
+ dev->power.request = RPM_REQ_NONE;
+ if (!dev->power.timer_autosuspends)
+ pm_runtime_deactivate_timer(dev);
+
+ if (dev->power.runtime_status == RPM_ACTIVE) {
+ retval = 1;
+ goto out;
+ }
+
+ if (dev->power.runtime_status == RPM_RESUMING
+ || dev->power.runtime_status == RPM_SUSPENDING) {
+ DEFINE_WAIT(wait);
+
+ if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
+ if (dev->power.runtime_status == RPM_SUSPENDING)
+ dev->power.deferred_resume = true;
+ else
+ retval = -EINPROGRESS;
+ goto out;
+ }
+
+ if (dev->power.irq_safe) {
+ spin_unlock(&dev->power.lock);
+
+ cpu_relax();
+
+ spin_lock(&dev->power.lock);
+ goto repeat;
+ }
+
+ /* Wait for the operation carried out in parallel with us. */
+ for (;;) {
+ prepare_to_wait(&dev->power.wait_queue, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (dev->power.runtime_status != RPM_RESUMING
+ && dev->power.runtime_status != RPM_SUSPENDING)
+ break;
+
+ spin_unlock_irq(&dev->power.lock);
+
+ schedule();
+
+ spin_lock_irq(&dev->power.lock);
+ }
+ finish_wait(&dev->power.wait_queue, &wait);
+ goto repeat;
+ }
+
+ /*
+ * See if we can skip waking up the parent. This is safe only if
+ * power.no_callbacks is set, because otherwise we don't know whether
+ * the resume will actually succeed.
+ */
+ if (dev->power.no_callbacks && !parent && dev->parent) {
+ spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
+ if (dev->parent->power.disable_depth > 0
+ || dev->parent->power.ignore_children
+ || dev->parent->power.runtime_status == RPM_ACTIVE) {
+ atomic_inc(&dev->parent->power.child_count);
+ spin_unlock(&dev->parent->power.lock);
+ retval = 1;
+ goto no_callback; /* Assume success. */
+ }
+ spin_unlock(&dev->parent->power.lock);
+ }
+
+ /* Carry out an asynchronous or a synchronous resume. */
+ if (rpmflags & RPM_ASYNC) {
+ dev->power.request = RPM_REQ_RESUME;
+ if (!dev->power.request_pending) {
+ dev->power.request_pending = true;
+ queue_work(pm_wq, &dev->power.work);
+ }
+ retval = 0;
+ goto out;
+ }
+
+ if (!parent && dev->parent) {
+ /*
+ * Increment the parent's usage counter and resume it if
+ * necessary. Not needed if dev is irq-safe; then the
+ * parent is permanently resumed.
+ */
+ parent = dev->parent;
+ if (dev->power.irq_safe)
+ goto skip_parent;
+ spin_unlock(&dev->power.lock);
+
+ pm_runtime_get_noresume(parent);
+
+ spin_lock(&parent->power.lock);
+ /*
+ * Resume the parent if it has runtime PM enabled and not been
+ * set to ignore its children.
+ */
+ if (!parent->power.disable_depth
+ && !parent->power.ignore_children) {
+ rpm_resume(parent, 0);
+ if (parent->power.runtime_status != RPM_ACTIVE)
+ retval = -EBUSY;
+ }
+ spin_unlock(&parent->power.lock);
+
+ spin_lock(&dev->power.lock);
+ if (retval)
+ goto out;
+ goto repeat;
+ }
+ skip_parent:
+
+ if (dev->power.no_callbacks)
+ goto no_callback; /* Assume success. */
+
+ __update_runtime_status(dev, RPM_RESUMING);
+
+ callback = RPM_GET_CALLBACK(dev, runtime_resume);
+
+ dev_pm_disable_wake_irq_check(dev, false);
+ retval = rpm_callback(callback, dev);
+ if (retval) {
+ __update_runtime_status(dev, RPM_SUSPENDED);
+ pm_runtime_cancel_pending(dev);
+ dev_pm_enable_wake_irq_check(dev, false);
+ } else {
+ no_callback:
+ __update_runtime_status(dev, RPM_ACTIVE);
+ pm_runtime_mark_last_busy(dev);
+ if (parent)
+ atomic_inc(&parent->power.child_count);
+ }
+ wake_up_all(&dev->power.wait_queue);
+
+ if (retval >= 0)
+ rpm_idle(dev, RPM_ASYNC);
+
+ out:
+ if (parent && !dev->power.irq_safe) {
+ spin_unlock_irq(&dev->power.lock);
+
+ pm_runtime_put(parent);
+
+ spin_lock_irq(&dev->power.lock);
+ }
+
+ trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
+
+ return retval;
+}
+
+/**
+ * pm_runtime_work - Universal runtime PM work function.
+ * @work: Work structure used for scheduling the execution of this function.
+ *
+ * Use @work to get the device object the work is to be done for, determine what
+ * is to be done and execute the appropriate runtime PM function.
+ */
+static void pm_runtime_work(struct work_struct *work)
+{
+ struct device *dev = container_of(work, struct device, power.work);
+ enum rpm_request req;
+
+ spin_lock_irq(&dev->power.lock);
+
+ if (!dev->power.request_pending)
+ goto out;
+
+ req = dev->power.request;
+ dev->power.request = RPM_REQ_NONE;
+ dev->power.request_pending = false;
+
+ switch (req) {
+ case RPM_REQ_NONE:
+ break;
+ case RPM_REQ_IDLE:
+ rpm_idle(dev, RPM_NOWAIT);
+ break;
+ case RPM_REQ_SUSPEND:
+ rpm_suspend(dev, RPM_NOWAIT);
+ break;
+ case RPM_REQ_AUTOSUSPEND:
+ rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
+ break;
+ case RPM_REQ_RESUME:
+ rpm_resume(dev, RPM_NOWAIT);
+ break;
+ }
+
+ out:
+ spin_unlock_irq(&dev->power.lock);
+}
+
+/**
+ * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
+ * @data: Device pointer passed by pm_schedule_suspend().
+ *
+ * Check if the time is right and queue a suspend request.
+ */
+static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
+{
+ struct device *dev = container_of(timer, struct device, power.suspend_timer);
+ unsigned long flags;
+ u64 expires;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ expires = dev->power.timer_expires;
+ /*
+ * If 'expires' is after the current time, we've been called
+ * too early.
+ */
+ if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
+ dev->power.timer_expires = 0;
+ rpm_suspend(dev, dev->power.timer_autosuspends ?
+ (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
+ }
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return HRTIMER_NORESTART;
+}
+
+/**
+ * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
+ * @dev: Device to suspend.
+ * @delay: Time to wait before submitting a suspend request, in milliseconds.
+ */
+int pm_schedule_suspend(struct device *dev, unsigned int delay)
+{
+ unsigned long flags;
+ u64 expires;
+ int retval;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ if (!delay) {
+ retval = rpm_suspend(dev, RPM_ASYNC);
+ goto out;
+ }
+
+ retval = rpm_check_suspend_allowed(dev);
+ if (retval)
+ goto out;
+
+ /* Other scheduled or pending requests need to be canceled. */
+ pm_runtime_cancel_pending(dev);
+
+ expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
+ dev->power.timer_expires = expires;
+ dev->power.timer_autosuspends = 0;
+ hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
+
+ out:
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(pm_schedule_suspend);
+
+/**
+ * __pm_runtime_idle - Entry point for runtime idle operations.
+ * @dev: Device to send idle notification for.
+ * @rpmflags: Flag bits.
+ *
+ * If the RPM_GET_PUT flag is set, decrement the device's usage count and
+ * return immediately if it is larger than zero. Then carry out an idle
+ * notification, either synchronous or asynchronous.
+ *
+ * This routine may be called in atomic context if the RPM_ASYNC flag is set,
+ * or if pm_runtime_irq_safe() has been called.
+ */
+int __pm_runtime_idle(struct device *dev, int rpmflags)
+{
+ unsigned long flags;
+ int retval;
+
+ if (rpmflags & RPM_GET_PUT) {
+ if (!atomic_dec_and_test(&dev->power.usage_count)) {
+ trace_rpm_usage_rcuidle(dev, rpmflags);
+ return 0;
+ }
+ }
+
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ retval = rpm_idle(dev, rpmflags);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(__pm_runtime_idle);
+
+/**
+ * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
+ * @dev: Device to suspend.
+ * @rpmflags: Flag bits.
+ *
+ * If the RPM_GET_PUT flag is set, decrement the device's usage count and
+ * return immediately if it is larger than zero. Then carry out a suspend,
+ * either synchronous or asynchronous.
+ *
+ * This routine may be called in atomic context if the RPM_ASYNC flag is set,
+ * or if pm_runtime_irq_safe() has been called.
+ */
+int __pm_runtime_suspend(struct device *dev, int rpmflags)
+{
+ unsigned long flags;
+ int retval;
+
+ if (rpmflags & RPM_GET_PUT) {
+ if (!atomic_dec_and_test(&dev->power.usage_count)) {
+ trace_rpm_usage_rcuidle(dev, rpmflags);
+ return 0;
+ }
+ }
+
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ retval = rpm_suspend(dev, rpmflags);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
+
+/**
+ * __pm_runtime_resume - Entry point for runtime resume operations.
+ * @dev: Device to resume.
+ * @rpmflags: Flag bits.
+ *
+ * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
+ * carry out a resume, either synchronous or asynchronous.
+ *
+ * This routine may be called in atomic context if the RPM_ASYNC flag is set,
+ * or if pm_runtime_irq_safe() has been called.
+ */
+int __pm_runtime_resume(struct device *dev, int rpmflags)
+{
+ unsigned long flags;
+ int retval;
+
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
+ dev->power.runtime_status != RPM_ACTIVE);
+
+ if (rpmflags & RPM_GET_PUT)
+ atomic_inc(&dev->power.usage_count);
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ retval = rpm_resume(dev, rpmflags);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(__pm_runtime_resume);
+
+/**
+ * pm_runtime_get_if_active - Conditionally bump up device usage counter.
+ * @dev: Device to handle.
+ * @ign_usage_count: Whether or not to look at the current usage counter value.
+ *
+ * Return -EINVAL if runtime PM is disabled for @dev.
+ *
+ * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
+ * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
+ * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
+ * without changing the usage counter.
+ *
+ * If @ign_usage_count is %true, this function can be used to prevent suspending
+ * the device when its runtime PM status is %RPM_ACTIVE.
+ *
+ * If @ign_usage_count is %false, this function can be used to prevent
+ * suspending the device when both its runtime PM status is %RPM_ACTIVE and its
+ * runtime PM usage counter is not zero.
+ *
+ * The caller is resposible for decrementing the runtime PM usage counter of
+ * @dev after this function has returned a positive value for it.
+ */
+int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
+{
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ if (dev->power.disable_depth > 0) {
+ retval = -EINVAL;
+ } else if (dev->power.runtime_status != RPM_ACTIVE) {
+ retval = 0;
+ } else if (ign_usage_count) {
+ retval = 1;
+ atomic_inc(&dev->power.usage_count);
+ } else {
+ retval = atomic_inc_not_zero(&dev->power.usage_count);
+ }
+ trace_rpm_usage_rcuidle(dev, 0);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
+
+/**
+ * __pm_runtime_set_status - Set runtime PM status of a device.
+ * @dev: Device to handle.
+ * @status: New runtime PM status of the device.
+ *
+ * If runtime PM of the device is disabled or its power.runtime_error field is
+ * different from zero, the status may be changed either to RPM_ACTIVE, or to
+ * RPM_SUSPENDED, as long as that reflects the actual state of the device.
+ * However, if the device has a parent and the parent is not active, and the
+ * parent's power.ignore_children flag is unset, the device's status cannot be
+ * set to RPM_ACTIVE, so -EBUSY is returned in that case.
+ *
+ * If successful, __pm_runtime_set_status() clears the power.runtime_error field
+ * and the device parent's counter of unsuspended children is modified to
+ * reflect the new status. If the new status is RPM_SUSPENDED, an idle
+ * notification request for the parent is submitted.
+ *
+ * If @dev has any suppliers (as reflected by device links to them), and @status
+ * is RPM_ACTIVE, they will be activated upfront and if the activation of one
+ * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
+ * of the @status value) and the suppliers will be deacticated on exit. The
+ * error returned by the failing supplier activation will be returned in that
+ * case.
+ */
+int __pm_runtime_set_status(struct device *dev, unsigned int status)
+{
+ struct device *parent = dev->parent;
+ bool notify_parent = false;
+ int error = 0;
+
+ if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
+ return -EINVAL;
+
+ spin_lock_irq(&dev->power.lock);
+
+ /*
+ * Prevent PM-runtime from being enabled for the device or return an
+ * error if it is enabled already and working.
+ */
+ if (dev->power.runtime_error || dev->power.disable_depth)
+ dev->power.disable_depth++;
+ else
+ error = -EAGAIN;
+
+ spin_unlock_irq(&dev->power.lock);
+
+ if (error)
+ return error;
+
+ /*
+ * If the new status is RPM_ACTIVE, the suppliers can be activated
+ * upfront regardless of the current status, because next time
+ * rpm_put_suppliers() runs, the rpm_active refcounts of the links
+ * involved will be dropped down to one anyway.
+ */
+ if (status == RPM_ACTIVE) {
+ int idx = device_links_read_lock();
+
+ error = rpm_get_suppliers(dev);
+ if (error)
+ status = RPM_SUSPENDED;
+
+ device_links_read_unlock(idx);
+ }
+
+ spin_lock_irq(&dev->power.lock);
+
+ if (dev->power.runtime_status == status || !parent)
+ goto out_set;
+
+ if (status == RPM_SUSPENDED) {
+ atomic_add_unless(&parent->power.child_count, -1, 0);
+ notify_parent = !parent->power.ignore_children;
+ } else {
+ spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
+
+ /*
+ * It is invalid to put an active child under a parent that is
+ * not active, has runtime PM enabled and the
+ * 'power.ignore_children' flag unset.
+ */
+ if (!parent->power.disable_depth
+ && !parent->power.ignore_children
+ && parent->power.runtime_status != RPM_ACTIVE) {
+ dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
+ dev_name(dev),
+ dev_name(parent));
+ error = -EBUSY;
+ } else if (dev->power.runtime_status == RPM_SUSPENDED) {
+ atomic_inc(&parent->power.child_count);
+ }
+
+ spin_unlock(&parent->power.lock);
+
+ if (error) {
+ status = RPM_SUSPENDED;
+ goto out;
+ }
+ }
+
+ out_set:
+ __update_runtime_status(dev, status);
+ if (!error)
+ dev->power.runtime_error = 0;
+
+ out:
+ spin_unlock_irq(&dev->power.lock);
+
+ if (notify_parent)
+ pm_request_idle(parent);
+
+ if (status == RPM_SUSPENDED) {
+ int idx = device_links_read_lock();
+
+ rpm_put_suppliers(dev);
+
+ device_links_read_unlock(idx);
+ }
+
+ pm_runtime_enable(dev);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
+
+/**
+ * __pm_runtime_barrier - Cancel pending requests and wait for completions.
+ * @dev: Device to handle.
+ *
+ * Flush all pending requests for the device from pm_wq and wait for all
+ * runtime PM operations involving the device in progress to complete.
+ *
+ * Should be called under dev->power.lock with interrupts disabled.
+ */
+static void __pm_runtime_barrier(struct device *dev)
+{
+ pm_runtime_deactivate_timer(dev);
+
+ if (dev->power.request_pending) {
+ dev->power.request = RPM_REQ_NONE;
+ spin_unlock_irq(&dev->power.lock);
+
+ cancel_work_sync(&dev->power.work);
+
+ spin_lock_irq(&dev->power.lock);
+ dev->power.request_pending = false;
+ }
+
+ if (dev->power.runtime_status == RPM_SUSPENDING
+ || dev->power.runtime_status == RPM_RESUMING
+ || dev->power.idle_notification) {
+ DEFINE_WAIT(wait);
+
+ /* Suspend, wake-up or idle notification in progress. */
+ for (;;) {
+ prepare_to_wait(&dev->power.wait_queue, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (dev->power.runtime_status != RPM_SUSPENDING
+ && dev->power.runtime_status != RPM_RESUMING
+ && !dev->power.idle_notification)
+ break;
+ spin_unlock_irq(&dev->power.lock);
+
+ schedule();
+
+ spin_lock_irq(&dev->power.lock);
+ }
+ finish_wait(&dev->power.wait_queue, &wait);
+ }
+}
+
+/**
+ * pm_runtime_barrier - Flush pending requests and wait for completions.
+ * @dev: Device to handle.
+ *
+ * Prevent the device from being suspended by incrementing its usage counter and
+ * if there's a pending resume request for the device, wake the device up.
+ * Next, make sure that all pending requests for the device have been flushed
+ * from pm_wq and wait for all runtime PM operations involving the device in
+ * progress to complete.
+ *
+ * Return value:
+ * 1, if there was a resume request pending and the device had to be woken up,
+ * 0, otherwise
+ */
+int pm_runtime_barrier(struct device *dev)
+{
+ int retval = 0;
+
+ pm_runtime_get_noresume(dev);
+ spin_lock_irq(&dev->power.lock);
+
+ if (dev->power.request_pending
+ && dev->power.request == RPM_REQ_RESUME) {
+ rpm_resume(dev, 0);
+ retval = 1;
+ }
+
+ __pm_runtime_barrier(dev);
+
+ spin_unlock_irq(&dev->power.lock);
+ pm_runtime_put_noidle(dev);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_barrier);
+
+/**
+ * __pm_runtime_disable - Disable runtime PM of a device.
+ * @dev: Device to handle.
+ * @check_resume: If set, check if there's a resume request for the device.
+ *
+ * Increment power.disable_depth for the device and if it was zero previously,
+ * cancel all pending runtime PM requests for the device and wait for all
+ * operations in progress to complete. The device can be either active or
+ * suspended after its runtime PM has been disabled.
+ *
+ * If @check_resume is set and there's a resume request pending when
+ * __pm_runtime_disable() is called and power.disable_depth is zero, the
+ * function will wake up the device before disabling its runtime PM.
+ */
+void __pm_runtime_disable(struct device *dev, bool check_resume)
+{
+ spin_lock_irq(&dev->power.lock);
+
+ if (dev->power.disable_depth > 0) {
+ dev->power.disable_depth++;
+ goto out;
+ }
+
+ /*
+ * Wake up the device if there's a resume request pending, because that
+ * means there probably is some I/O to process and disabling runtime PM
+ * shouldn't prevent the device from processing the I/O.
+ */
+ if (check_resume && dev->power.request_pending
+ && dev->power.request == RPM_REQ_RESUME) {
+ /*
+ * Prevent suspends and idle notifications from being carried
+ * out after we have woken up the device.
+ */
+ pm_runtime_get_noresume(dev);
+
+ rpm_resume(dev, 0);
+
+ pm_runtime_put_noidle(dev);
+ }
+
+ /* Update time accounting before disabling PM-runtime. */
+ update_pm_runtime_accounting(dev);
+
+ if (!dev->power.disable_depth++)
+ __pm_runtime_barrier(dev);
+
+ out:
+ spin_unlock_irq(&dev->power.lock);
+}
+EXPORT_SYMBOL_GPL(__pm_runtime_disable);
+
+/**
+ * pm_runtime_enable - Enable runtime PM of a device.
+ * @dev: Device to handle.
+ */
+void pm_runtime_enable(struct device *dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ if (dev->power.disable_depth > 0) {
+ dev->power.disable_depth--;
+
+ /* About to enable runtime pm, set accounting_timestamp to now */
+ if (!dev->power.disable_depth)
+ dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
+ } else {
+ dev_warn(dev, "Unbalanced %s!\n", __func__);
+ }
+
+ WARN(!dev->power.disable_depth &&
+ dev->power.runtime_status == RPM_SUSPENDED &&
+ !dev->power.ignore_children &&
+ atomic_read(&dev->power.child_count) > 0,
+ "Enabling runtime PM for inactive device (%s) with active children\n",
+ dev_name(dev));
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_enable);
+
+/**
+ * pm_runtime_forbid - Block runtime PM of a device.
+ * @dev: Device to handle.
+ *
+ * Increase the device's usage count and clear its power.runtime_auto flag,
+ * so that it cannot be suspended at run time until pm_runtime_allow() is called
+ * for it.
+ */
+void pm_runtime_forbid(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+ if (!dev->power.runtime_auto)
+ goto out;
+
+ dev->power.runtime_auto = false;
+ atomic_inc(&dev->power.usage_count);
+ rpm_resume(dev, 0);
+
+ out:
+ spin_unlock_irq(&dev->power.lock);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_forbid);
+
+/**
+ * pm_runtime_allow - Unblock runtime PM of a device.
+ * @dev: Device to handle.
+ *
+ * Decrease the device's usage count and set its power.runtime_auto flag.
+ */
+void pm_runtime_allow(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.runtime_auto)
+ goto out;
+
+ dev->power.runtime_auto = true;
+ if (atomic_dec_and_test(&dev->power.usage_count))
+ rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
+ else
+ trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
+
+ out:
+ spin_unlock_irq(&dev->power.lock);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_allow);
+
+/**
+ * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
+ * @dev: Device to handle.
+ *
+ * Set the power.no_callbacks flag, which tells the PM core that this
+ * device is power-managed through its parent and has no runtime PM
+ * callbacks of its own. The runtime sysfs attributes will be removed.
+ */
+void pm_runtime_no_callbacks(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+ dev->power.no_callbacks = 1;
+ spin_unlock_irq(&dev->power.lock);
+ if (device_is_registered(dev))
+ rpm_sysfs_remove(dev);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
+
+/**
+ * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
+ * @dev: Device to handle
+ *
+ * Set the power.irq_safe flag, which tells the PM core that the
+ * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
+ * always be invoked with the spinlock held and interrupts disabled. It also
+ * causes the parent's usage counter to be permanently incremented, preventing
+ * the parent from runtime suspending -- otherwise an irq-safe child might have
+ * to wait for a non-irq-safe parent.
+ */
+void pm_runtime_irq_safe(struct device *dev)
+{
+ if (dev->parent)
+ pm_runtime_get_sync(dev->parent);
+ spin_lock_irq(&dev->power.lock);
+ dev->power.irq_safe = 1;
+ spin_unlock_irq(&dev->power.lock);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
+
+/**
+ * update_autosuspend - Handle a change to a device's autosuspend settings.
+ * @dev: Device to handle.
+ * @old_delay: The former autosuspend_delay value.
+ * @old_use: The former use_autosuspend value.
+ *
+ * Prevent runtime suspend if the new delay is negative and use_autosuspend is
+ * set; otherwise allow it. Send an idle notification if suspends are allowed.
+ *
+ * This function must be called under dev->power.lock with interrupts disabled.
+ */
+static void update_autosuspend(struct device *dev, int old_delay, int old_use)
+{
+ int delay = dev->power.autosuspend_delay;
+
+ /* Should runtime suspend be prevented now? */
+ if (dev->power.use_autosuspend && delay < 0) {
+
+ /* If it used to be allowed then prevent it. */
+ if (!old_use || old_delay >= 0) {
+ atomic_inc(&dev->power.usage_count);
+ rpm_resume(dev, 0);
+ } else {
+ trace_rpm_usage_rcuidle(dev, 0);
+ }
+ }
+
+ /* Runtime suspend should be allowed now. */
+ else {
+
+ /* If it used to be prevented then allow it. */
+ if (old_use && old_delay < 0)
+ atomic_dec(&dev->power.usage_count);
+
+ /* Maybe we can autosuspend now. */
+ rpm_idle(dev, RPM_AUTO);
+ }
+}
+
+/**
+ * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
+ * @dev: Device to handle.
+ * @delay: Value of the new delay in milliseconds.
+ *
+ * Set the device's power.autosuspend_delay value. If it changes to negative
+ * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
+ * changes the other way, allow runtime suspends.
+ */
+void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
+{
+ int old_delay, old_use;
+
+ spin_lock_irq(&dev->power.lock);
+ old_delay = dev->power.autosuspend_delay;
+ old_use = dev->power.use_autosuspend;
+ dev->power.autosuspend_delay = delay;
+ update_autosuspend(dev, old_delay, old_use);
+ spin_unlock_irq(&dev->power.lock);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
+
+/**
+ * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
+ * @dev: Device to handle.
+ * @use: New value for use_autosuspend.
+ *
+ * Set the device's power.use_autosuspend flag, and allow or prevent runtime
+ * suspends as needed.
+ */
+void __pm_runtime_use_autosuspend(struct device *dev, bool use)
+{
+ int old_delay, old_use;
+
+ spin_lock_irq(&dev->power.lock);
+ old_delay = dev->power.autosuspend_delay;
+ old_use = dev->power.use_autosuspend;
+ dev->power.use_autosuspend = use;
+ update_autosuspend(dev, old_delay, old_use);
+ spin_unlock_irq(&dev->power.lock);
+}
+EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
+
+/**
+ * pm_runtime_init - Initialize runtime PM fields in given device object.
+ * @dev: Device object to initialize.
+ */
+void pm_runtime_init(struct device *dev)
+{
+ dev->power.runtime_status = RPM_SUSPENDED;
+ dev->power.idle_notification = false;
+
+ dev->power.disable_depth = 1;
+ atomic_set(&dev->power.usage_count, 0);
+
+ dev->power.runtime_error = 0;
+
+ atomic_set(&dev->power.child_count, 0);
+ pm_suspend_ignore_children(dev, false);
+ dev->power.runtime_auto = true;
+
+ dev->power.request_pending = false;
+ dev->power.request = RPM_REQ_NONE;
+ dev->power.deferred_resume = false;
+ dev->power.needs_force_resume = 0;
+ INIT_WORK(&dev->power.work, pm_runtime_work);
+
+ dev->power.timer_expires = 0;
+ hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ dev->power.suspend_timer.function = pm_suspend_timer_fn;
+
+ init_waitqueue_head(&dev->power.wait_queue);
+}
+
+/**
+ * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
+ * @dev: Device object to re-initialize.
+ */
+void pm_runtime_reinit(struct device *dev)
+{
+ if (!pm_runtime_enabled(dev)) {
+ if (dev->power.runtime_status == RPM_ACTIVE)
+ pm_runtime_set_suspended(dev);
+ if (dev->power.irq_safe) {
+ spin_lock_irq(&dev->power.lock);
+ dev->power.irq_safe = 0;
+ spin_unlock_irq(&dev->power.lock);
+ if (dev->parent)
+ pm_runtime_put(dev->parent);
+ }
+ }
+}
+
+/**
+ * pm_runtime_remove - Prepare for removing a device from device hierarchy.
+ * @dev: Device object being removed from device hierarchy.
+ */
+void pm_runtime_remove(struct device *dev)
+{
+ __pm_runtime_disable(dev, false);
+ pm_runtime_reinit(dev);
+}
+
+/**
+ * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
+ * @dev: Consumer device.
+ */
+void pm_runtime_get_suppliers(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
+ device_links_read_lock_held())
+ if (link->flags & DL_FLAG_PM_RUNTIME) {
+ link->supplier_preactivated = true;
+ pm_runtime_get_sync(link->supplier);
+ refcount_inc(&link->rpm_active);
+ }
+
+ device_links_read_unlock(idx);
+}
+
+/**
+ * pm_runtime_put_suppliers - Drop references to supplier devices.
+ * @dev: Consumer device.
+ */
+void pm_runtime_put_suppliers(struct device *dev)
+{
+ struct device_link *link;
+ unsigned long flags;
+ bool put;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
+ device_links_read_lock_held())
+ if (link->supplier_preactivated) {
+ link->supplier_preactivated = false;
+ spin_lock_irqsave(&dev->power.lock, flags);
+ put = pm_runtime_status_suspended(dev) &&
+ refcount_dec_not_one(&link->rpm_active);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+ if (put)
+ pm_runtime_put(link->supplier);
+ }
+
+ device_links_read_unlock(idx);
+}
+
+void pm_runtime_new_link(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+ dev->power.links_count++;
+ spin_unlock_irq(&dev->power.lock);
+}
+
+static void pm_runtime_drop_link_count(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+ WARN_ON(dev->power.links_count == 0);
+ dev->power.links_count--;
+ spin_unlock_irq(&dev->power.lock);
+}
+
+/**
+ * pm_runtime_drop_link - Prepare for device link removal.
+ * @link: Device link going away.
+ *
+ * Drop the link count of the consumer end of @link and decrement the supplier
+ * device's runtime PM usage counter as many times as needed to drop all of the
+ * PM runtime reference to it from the consumer.
+ */
+void pm_runtime_drop_link(struct device_link *link)
+{
+ if (!(link->flags & DL_FLAG_PM_RUNTIME))
+ return;
+
+ pm_runtime_drop_link_count(link->consumer);
+ pm_runtime_release_supplier(link);
+ pm_request_idle(link->supplier);
+}
+
+static bool pm_runtime_need_not_resume(struct device *dev)
+{
+ return atomic_read(&dev->power.usage_count) <= 1 &&
+ (atomic_read(&dev->power.child_count) == 0 ||
+ dev->power.ignore_children);
+}
+
+/**
+ * pm_runtime_force_suspend - Force a device into suspend state if needed.
+ * @dev: Device to suspend.
+ *
+ * Disable runtime PM so we safely can check the device's runtime PM status and
+ * if it is active, invoke its ->runtime_suspend callback to suspend it and
+ * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's
+ * usage and children counters don't indicate that the device was in use before
+ * the system-wide transition under way, decrement its parent's children counter
+ * (if there is a parent). Keep runtime PM disabled to preserve the state
+ * unless we encounter errors.
+ *
+ * Typically this function may be invoked from a system suspend callback to make
+ * sure the device is put into low power state and it should only be used during
+ * system-wide PM transitions to sleep states. It assumes that the analogous
+ * pm_runtime_force_resume() will be used to resume the device.
+ */
+int pm_runtime_force_suspend(struct device *dev)
+{
+ int (*callback)(struct device *);
+ int ret;
+
+ pm_runtime_disable(dev);
+ if (pm_runtime_status_suspended(dev))
+ return 0;
+
+ callback = RPM_GET_CALLBACK(dev, runtime_suspend);
+
+ ret = callback ? callback(dev) : 0;
+ if (ret)
+ goto err;
+
+ /*
+ * If the device can stay in suspend after the system-wide transition
+ * to the working state that will follow, drop the children counter of
+ * its parent, but set its status to RPM_SUSPENDED anyway in case this
+ * function will be called again for it in the meantime.
+ */
+ if (pm_runtime_need_not_resume(dev)) {
+ pm_runtime_set_suspended(dev);
+ } else {
+ __update_runtime_status(dev, RPM_SUSPENDED);
+ dev->power.needs_force_resume = 1;
+ }
+
+ return 0;
+
+err:
+ pm_runtime_enable(dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
+
+/**
+ * pm_runtime_force_resume - Force a device into resume state if needed.
+ * @dev: Device to resume.
+ *
+ * Prior invoking this function we expect the user to have brought the device
+ * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
+ * those actions and bring the device into full power, if it is expected to be
+ * used on system resume. In the other case, we defer the resume to be managed
+ * via runtime PM.
+ *
+ * Typically this function may be invoked from a system resume callback.
+ */
+int pm_runtime_force_resume(struct device *dev)
+{
+ int (*callback)(struct device *);
+ int ret = 0;
+
+ if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
+ goto out;
+
+ /*
+ * The value of the parent's children counter is correct already, so
+ * just update the status of the device.
+ */
+ __update_runtime_status(dev, RPM_ACTIVE);
+
+ callback = RPM_GET_CALLBACK(dev, runtime_resume);
+
+ ret = callback ? callback(dev) : 0;
+ if (ret) {
+ pm_runtime_set_suspended(dev);
+ goto out;
+ }
+
+ pm_runtime_mark_last_busy(dev);
+out:
+ dev->power.needs_force_resume = 0;
+ pm_runtime_enable(dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
new file mode 100644
index 000000000..a1474fb67
--- /dev/null
+++ b/drivers/base/power/sysfs.c
@@ -0,0 +1,838 @@
+// SPDX-License-Identifier: GPL-2.0
+/* sysfs entries for device PM */
+#include <linux/device.h>
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/export.h>
+#include <linux/pm_qos.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_wakeup.h>
+#include <linux/atomic.h>
+#include <linux/jiffies.h>
+#include "power.h"
+
+/*
+ * control - Report/change current runtime PM setting of the device
+ *
+ * Runtime power management of a device can be blocked with the help of
+ * this attribute. All devices have one of the following two values for
+ * the power/control file:
+ *
+ * + "auto\n" to allow the device to be power managed at run time;
+ * + "on\n" to prevent the device from being power managed at run time;
+ *
+ * The default for all devices is "auto", which means that devices may be
+ * subject to automatic power management, depending on their drivers.
+ * Changing this attribute to "on" prevents the driver from power managing
+ * the device at run time. Doing that while the device is suspended causes
+ * it to be woken up.
+ *
+ * wakeup - Report/change current wakeup option for device
+ *
+ * Some devices support "wakeup" events, which are hardware signals
+ * used to activate devices from suspended or low power states. Such
+ * devices have one of three values for the sysfs power/wakeup file:
+ *
+ * + "enabled\n" to issue the events;
+ * + "disabled\n" not to do so; or
+ * + "\n" for temporary or permanent inability to issue wakeup.
+ *
+ * (For example, unconfigured USB devices can't issue wakeups.)
+ *
+ * Familiar examples of devices that can issue wakeup events include
+ * keyboards and mice (both PS2 and USB styles), power buttons, modems,
+ * "Wake-On-LAN" Ethernet links, GPIO lines, and more. Some events
+ * will wake the entire system from a suspend state; others may just
+ * wake up the device (if the system as a whole is already active).
+ * Some wakeup events use normal IRQ lines; other use special out
+ * of band signaling.
+ *
+ * It is the responsibility of device drivers to enable (or disable)
+ * wakeup signaling as part of changing device power states, respecting
+ * the policy choices provided through the driver model.
+ *
+ * Devices may not be able to generate wakeup events from all power
+ * states. Also, the events may be ignored in some configurations;
+ * for example, they might need help from other devices that aren't
+ * active, or which may have wakeup disabled. Some drivers rely on
+ * wakeup events internally (unless they are disabled), keeping
+ * their hardware in low power modes whenever they're unused. This
+ * saves runtime power, without requiring system-wide sleep states.
+ *
+ * async - Report/change current async suspend setting for the device
+ *
+ * Asynchronous suspend and resume of the device during system-wide power
+ * state transitions can be enabled by writing "enabled" to this file.
+ * Analogously, if "disabled" is written to this file, the device will be
+ * suspended and resumed synchronously.
+ *
+ * All devices have one of the following two values for power/async:
+ *
+ * + "enabled\n" to permit the asynchronous suspend/resume of the device;
+ * + "disabled\n" to forbid it;
+ *
+ * NOTE: It generally is unsafe to permit the asynchronous suspend/resume
+ * of a device unless it is certain that all of the PM dependencies of the
+ * device are known to the PM core. However, for some devices this
+ * attribute is set to "enabled" by bus type code or device drivers and in
+ * that cases it should be safe to leave the default value.
+ *
+ * autosuspend_delay_ms - Report/change a device's autosuspend_delay value
+ *
+ * Some drivers don't want to carry out a runtime suspend as soon as a
+ * device becomes idle; they want it always to remain idle for some period
+ * of time before suspending it. This period is the autosuspend_delay
+ * value (expressed in milliseconds) and it can be controlled by the user.
+ * If the value is negative then the device will never be runtime
+ * suspended.
+ *
+ * NOTE: The autosuspend_delay_ms attribute and the autosuspend_delay
+ * value are used only if the driver calls pm_runtime_use_autosuspend().
+ *
+ * wakeup_count - Report the number of wakeup events related to the device
+ */
+
+const char power_group_name[] = "power";
+EXPORT_SYMBOL_GPL(power_group_name);
+
+static const char ctrl_auto[] = "auto";
+static const char ctrl_on[] = "on";
+
+static ssize_t control_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n",
+ dev->power.runtime_auto ? ctrl_auto : ctrl_on);
+}
+
+static ssize_t control_store(struct device * dev, struct device_attribute *attr,
+ const char * buf, size_t n)
+{
+ device_lock(dev);
+ if (sysfs_streq(buf, ctrl_auto))
+ pm_runtime_allow(dev);
+ else if (sysfs_streq(buf, ctrl_on))
+ pm_runtime_forbid(dev);
+ else
+ n = -EINVAL;
+ device_unlock(dev);
+ return n;
+}
+
+static DEVICE_ATTR_RW(control);
+
+static ssize_t runtime_active_time_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u64 tmp = pm_runtime_active_time(dev);
+
+ do_div(tmp, NSEC_PER_MSEC);
+
+ return sysfs_emit(buf, "%llu\n", tmp);
+}
+
+static DEVICE_ATTR_RO(runtime_active_time);
+
+static ssize_t runtime_suspended_time_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u64 tmp = pm_runtime_suspended_time(dev);
+
+ do_div(tmp, NSEC_PER_MSEC);
+
+ return sysfs_emit(buf, "%llu\n", tmp);
+}
+
+static DEVICE_ATTR_RO(runtime_suspended_time);
+
+static ssize_t runtime_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const char *output;
+
+ if (dev->power.runtime_error) {
+ output = "error";
+ } else if (dev->power.disable_depth) {
+ output = "unsupported";
+ } else {
+ switch (dev->power.runtime_status) {
+ case RPM_SUSPENDED:
+ output = "suspended";
+ break;
+ case RPM_SUSPENDING:
+ output = "suspending";
+ break;
+ case RPM_RESUMING:
+ output = "resuming";
+ break;
+ case RPM_ACTIVE:
+ output = "active";
+ break;
+ default:
+ return -EIO;
+ }
+ }
+ return sysfs_emit(buf, "%s\n", output);
+}
+
+static DEVICE_ATTR_RO(runtime_status);
+
+static ssize_t autosuspend_delay_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ if (!dev->power.use_autosuspend)
+ return -EIO;
+
+ return sysfs_emit(buf, "%d\n", dev->power.autosuspend_delay);
+}
+
+static ssize_t autosuspend_delay_ms_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t n)
+{
+ long delay;
+
+ if (!dev->power.use_autosuspend)
+ return -EIO;
+
+ if (kstrtol(buf, 10, &delay) != 0 || delay != (int) delay)
+ return -EINVAL;
+
+ device_lock(dev);
+ pm_runtime_set_autosuspend_delay(dev, delay);
+ device_unlock(dev);
+ return n;
+}
+
+static DEVICE_ATTR_RW(autosuspend_delay_ms);
+
+static ssize_t pm_qos_resume_latency_us_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ s32 value = dev_pm_qos_requested_resume_latency(dev);
+
+ if (value == 0)
+ return sysfs_emit(buf, "n/a\n");
+ if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+ value = 0;
+
+ return sysfs_emit(buf, "%d\n", value);
+}
+
+static ssize_t pm_qos_resume_latency_us_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ s32 value;
+ int ret;
+
+ if (!kstrtos32(buf, 0, &value)) {
+ /*
+ * Prevent users from writing negative or "no constraint" values
+ * directly.
+ */
+ if (value < 0 || value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+ return -EINVAL;
+
+ if (value == 0)
+ value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
+ } else if (sysfs_streq(buf, "n/a")) {
+ value = 0;
+ } else {
+ return -EINVAL;
+ }
+
+ ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req,
+ value);
+ return ret < 0 ? ret : n;
+}
+
+static DEVICE_ATTR_RW(pm_qos_resume_latency_us);
+
+static ssize_t pm_qos_latency_tolerance_us_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ s32 value = dev_pm_qos_get_user_latency_tolerance(dev);
+
+ if (value < 0)
+ return sysfs_emit(buf, "%s\n", "auto");
+ if (value == PM_QOS_LATENCY_ANY)
+ return sysfs_emit(buf, "%s\n", "any");
+
+ return sysfs_emit(buf, "%d\n", value);
+}
+
+static ssize_t pm_qos_latency_tolerance_us_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ s32 value;
+ int ret;
+
+ if (kstrtos32(buf, 0, &value) == 0) {
+ /* Users can't write negative values directly */
+ if (value < 0)
+ return -EINVAL;
+ } else {
+ if (sysfs_streq(buf, "auto"))
+ value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
+ else if (sysfs_streq(buf, "any"))
+ value = PM_QOS_LATENCY_ANY;
+ else
+ return -EINVAL;
+ }
+ ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
+ return ret < 0 ? ret : n;
+}
+
+static DEVICE_ATTR_RW(pm_qos_latency_tolerance_us);
+
+static ssize_t pm_qos_no_power_off_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev)
+ & PM_QOS_FLAG_NO_POWER_OFF));
+}
+
+static ssize_t pm_qos_no_power_off_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ int ret;
+
+ if (kstrtoint(buf, 0, &ret))
+ return -EINVAL;
+
+ if (ret != 0 && ret != 1)
+ return -EINVAL;
+
+ ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_NO_POWER_OFF, ret);
+ return ret < 0 ? ret : n;
+}
+
+static DEVICE_ATTR_RW(pm_qos_no_power_off);
+
+#ifdef CONFIG_PM_SLEEP
+static const char _enabled[] = "enabled";
+static const char _disabled[] = "disabled";
+
+static ssize_t wakeup_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n", device_can_wakeup(dev)
+ ? (device_may_wakeup(dev) ? _enabled : _disabled)
+ : "");
+}
+
+static ssize_t wakeup_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ if (!device_can_wakeup(dev))
+ return -EINVAL;
+
+ if (sysfs_streq(buf, _enabled))
+ device_set_wakeup_enable(dev, 1);
+ else if (sysfs_streq(buf, _disabled))
+ device_set_wakeup_enable(dev, 0);
+ else
+ return -EINVAL;
+ return n;
+}
+
+static DEVICE_ATTR_RW(wakeup);
+
+static ssize_t wakeup_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long count;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ count = dev->power.wakeup->wakeup_count;
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lu\n", count);
+}
+
+static DEVICE_ATTR_RO(wakeup_count);
+
+static ssize_t wakeup_active_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long count;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ count = dev->power.wakeup->active_count;
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lu\n", count);
+}
+
+static DEVICE_ATTR_RO(wakeup_active_count);
+
+static ssize_t wakeup_abort_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long count;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ count = dev->power.wakeup->wakeup_count;
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lu\n", count);
+}
+
+static DEVICE_ATTR_RO(wakeup_abort_count);
+
+static ssize_t wakeup_expire_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long count;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ count = dev->power.wakeup->expire_count;
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lu\n", count);
+}
+
+static DEVICE_ATTR_RO(wakeup_expire_count);
+
+static ssize_t wakeup_active_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned int active;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ active = dev->power.wakeup->active;
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%u\n", active);
+}
+
+static DEVICE_ATTR_RO(wakeup_active);
+
+static ssize_t wakeup_total_time_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ s64 msec;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ msec = ktime_to_ms(dev->power.wakeup->total_time);
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lld\n", msec);
+}
+
+static DEVICE_ATTR_RO(wakeup_total_time_ms);
+
+static ssize_t wakeup_max_time_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ s64 msec;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ msec = ktime_to_ms(dev->power.wakeup->max_time);
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lld\n", msec);
+}
+
+static DEVICE_ATTR_RO(wakeup_max_time_ms);
+
+static ssize_t wakeup_last_time_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ s64 msec;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ msec = ktime_to_ms(dev->power.wakeup->last_time);
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lld\n", msec);
+}
+
+static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
+ kgid_t kgid)
+{
+ if (dev->power.wakeup && dev->power.wakeup->dev)
+ return device_change_owner(dev->power.wakeup->dev, kuid, kgid);
+ return 0;
+}
+
+static DEVICE_ATTR_RO(wakeup_last_time_ms);
+
+#ifdef CONFIG_PM_AUTOSLEEP
+static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ s64 msec;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ msec = ktime_to_ms(dev->power.wakeup->prevent_sleep_time);
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lld\n", msec);
+}
+
+static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms);
+#endif /* CONFIG_PM_AUTOSLEEP */
+#else /* CONFIG_PM_SLEEP */
+static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
+ kgid_t kgid)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_ADVANCED_DEBUG
+static ssize_t runtime_usage_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", atomic_read(&dev->power.usage_count));
+}
+static DEVICE_ATTR_RO(runtime_usage);
+
+static ssize_t runtime_active_kids_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%d\n", dev->power.ignore_children ?
+ 0 : atomic_read(&dev->power.child_count));
+}
+static DEVICE_ATTR_RO(runtime_active_kids);
+
+static ssize_t runtime_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const char *output;
+
+ if (dev->power.disable_depth && !dev->power.runtime_auto)
+ output = "disabled & forbidden";
+ else if (dev->power.disable_depth)
+ output = "disabled";
+ else if (!dev->power.runtime_auto)
+ output = "forbidden";
+ else
+ output = "enabled";
+
+ return sysfs_emit(buf, "%s\n", output);
+}
+static DEVICE_ATTR_RO(runtime_enabled);
+
+#ifdef CONFIG_PM_SLEEP
+static ssize_t async_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n",
+ device_async_suspend_enabled(dev) ?
+ _enabled : _disabled);
+}
+
+static ssize_t async_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ if (sysfs_streq(buf, _enabled))
+ device_enable_async_suspend(dev);
+ else if (sysfs_streq(buf, _disabled))
+ device_disable_async_suspend(dev);
+ else
+ return -EINVAL;
+ return n;
+}
+
+static DEVICE_ATTR_RW(async);
+
+#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM_ADVANCED_DEBUG */
+
+static struct attribute *power_attrs[] = {
+#ifdef CONFIG_PM_ADVANCED_DEBUG
+#ifdef CONFIG_PM_SLEEP
+ &dev_attr_async.attr,
+#endif
+ &dev_attr_runtime_status.attr,
+ &dev_attr_runtime_usage.attr,
+ &dev_attr_runtime_active_kids.attr,
+ &dev_attr_runtime_enabled.attr,
+#endif /* CONFIG_PM_ADVANCED_DEBUG */
+ NULL,
+};
+static const struct attribute_group pm_attr_group = {
+ .name = power_group_name,
+ .attrs = power_attrs,
+};
+
+static struct attribute *wakeup_attrs[] = {
+#ifdef CONFIG_PM_SLEEP
+ &dev_attr_wakeup.attr,
+ &dev_attr_wakeup_count.attr,
+ &dev_attr_wakeup_active_count.attr,
+ &dev_attr_wakeup_abort_count.attr,
+ &dev_attr_wakeup_expire_count.attr,
+ &dev_attr_wakeup_active.attr,
+ &dev_attr_wakeup_total_time_ms.attr,
+ &dev_attr_wakeup_max_time_ms.attr,
+ &dev_attr_wakeup_last_time_ms.attr,
+#ifdef CONFIG_PM_AUTOSLEEP
+ &dev_attr_wakeup_prevent_sleep_time_ms.attr,
+#endif
+#endif
+ NULL,
+};
+static const struct attribute_group pm_wakeup_attr_group = {
+ .name = power_group_name,
+ .attrs = wakeup_attrs,
+};
+
+static struct attribute *runtime_attrs[] = {
+#ifndef CONFIG_PM_ADVANCED_DEBUG
+ &dev_attr_runtime_status.attr,
+#endif
+ &dev_attr_control.attr,
+ &dev_attr_runtime_suspended_time.attr,
+ &dev_attr_runtime_active_time.attr,
+ &dev_attr_autosuspend_delay_ms.attr,
+ NULL,
+};
+static const struct attribute_group pm_runtime_attr_group = {
+ .name = power_group_name,
+ .attrs = runtime_attrs,
+};
+
+static struct attribute *pm_qos_resume_latency_attrs[] = {
+ &dev_attr_pm_qos_resume_latency_us.attr,
+ NULL,
+};
+static const struct attribute_group pm_qos_resume_latency_attr_group = {
+ .name = power_group_name,
+ .attrs = pm_qos_resume_latency_attrs,
+};
+
+static struct attribute *pm_qos_latency_tolerance_attrs[] = {
+ &dev_attr_pm_qos_latency_tolerance_us.attr,
+ NULL,
+};
+static const struct attribute_group pm_qos_latency_tolerance_attr_group = {
+ .name = power_group_name,
+ .attrs = pm_qos_latency_tolerance_attrs,
+};
+
+static struct attribute *pm_qos_flags_attrs[] = {
+ &dev_attr_pm_qos_no_power_off.attr,
+ NULL,
+};
+static const struct attribute_group pm_qos_flags_attr_group = {
+ .name = power_group_name,
+ .attrs = pm_qos_flags_attrs,
+};
+
+int dpm_sysfs_add(struct device *dev)
+{
+ int rc;
+
+ /* No need to create PM sysfs if explicitly disabled. */
+ if (device_pm_not_required(dev))
+ return 0;
+
+ rc = sysfs_create_group(&dev->kobj, &pm_attr_group);
+ if (rc)
+ return rc;
+
+ if (!pm_runtime_has_no_callbacks(dev)) {
+ rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group);
+ if (rc)
+ goto err_out;
+ }
+ if (device_can_wakeup(dev)) {
+ rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
+ if (rc)
+ goto err_runtime;
+ }
+ if (dev->power.set_latency_tolerance) {
+ rc = sysfs_merge_group(&dev->kobj,
+ &pm_qos_latency_tolerance_attr_group);
+ if (rc)
+ goto err_wakeup;
+ }
+ rc = pm_wakeup_source_sysfs_add(dev);
+ if (rc)
+ goto err_latency;
+ return 0;
+
+ err_latency:
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
+ err_wakeup:
+ sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
+ err_runtime:
+ sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
+ err_out:
+ sysfs_remove_group(&dev->kobj, &pm_attr_group);
+ return rc;
+}
+
+int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
+{
+ int rc;
+
+ if (device_pm_not_required(dev))
+ return 0;
+
+ rc = sysfs_group_change_owner(&dev->kobj, &pm_attr_group, kuid, kgid);
+ if (rc)
+ return rc;
+
+ if (!pm_runtime_has_no_callbacks(dev)) {
+ rc = sysfs_group_change_owner(
+ &dev->kobj, &pm_runtime_attr_group, kuid, kgid);
+ if (rc)
+ return rc;
+ }
+
+ if (device_can_wakeup(dev)) {
+ rc = sysfs_group_change_owner(&dev->kobj, &pm_wakeup_attr_group,
+ kuid, kgid);
+ if (rc)
+ return rc;
+
+ rc = dpm_sysfs_wakeup_change_owner(dev, kuid, kgid);
+ if (rc)
+ return rc;
+ }
+
+ if (dev->power.set_latency_tolerance) {
+ rc = sysfs_group_change_owner(
+ &dev->kobj, &pm_qos_latency_tolerance_attr_group, kuid,
+ kgid);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+int wakeup_sysfs_add(struct device *dev)
+{
+ int ret = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
+
+ if (!ret)
+ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+
+ return ret;
+}
+
+void wakeup_sysfs_remove(struct device *dev)
+{
+ sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
+ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+}
+
+int pm_qos_sysfs_add_resume_latency(struct device *dev)
+{
+ return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
+}
+
+void pm_qos_sysfs_remove_resume_latency(struct device *dev)
+{
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
+}
+
+int pm_qos_sysfs_add_flags(struct device *dev)
+{
+ return sysfs_merge_group(&dev->kobj, &pm_qos_flags_attr_group);
+}
+
+void pm_qos_sysfs_remove_flags(struct device *dev)
+{
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group);
+}
+
+int pm_qos_sysfs_add_latency_tolerance(struct device *dev)
+{
+ return sysfs_merge_group(&dev->kobj,
+ &pm_qos_latency_tolerance_attr_group);
+}
+
+void pm_qos_sysfs_remove_latency_tolerance(struct device *dev)
+{
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
+}
+
+void rpm_sysfs_remove(struct device *dev)
+{
+ sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
+}
+
+void dpm_sysfs_remove(struct device *dev)
+{
+ if (device_pm_not_required(dev))
+ return;
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
+ dev_pm_qos_constraints_destroy(dev);
+ rpm_sysfs_remove(dev);
+ sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
+ sysfs_remove_group(&dev->kobj, &pm_attr_group);
+}
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
new file mode 100644
index 000000000..72b7a9233
--- /dev/null
+++ b/drivers/base/power/trace.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/base/power/trace.c
+ *
+ * Copyright (C) 2006 Linus Torvalds
+ *
+ * Trace facility for suspend/resume problems, when none of the
+ * devices may be working.
+ */
+#define pr_fmt(fmt) "PM: " fmt
+
+#include <linux/pm-trace.h>
+#include <linux/export.h>
+#include <linux/rtc.h>
+#include <linux/suspend.h>
+#include <linux/init.h>
+
+#include <linux/mc146818rtc.h>
+
+#include "power.h"
+
+/*
+ * Horrid, horrid, horrid.
+ *
+ * It turns out that the _only_ piece of hardware that actually
+ * keeps its value across a hard boot (and, more importantly, the
+ * POST init sequence) is literally the realtime clock.
+ *
+ * Never mind that an RTC chip has 114 bytes (and often a whole
+ * other bank of an additional 128 bytes) of nice SRAM that is
+ * _designed_ to keep data - the POST will clear it. So we literally
+ * can just use the few bytes of actual time data, which means that
+ * we're really limited.
+ *
+ * It means, for example, that we can't use the seconds at all
+ * (since the time between the hang and the boot might be more
+ * than a minute), and we'd better not depend on the low bits of
+ * the minutes either.
+ *
+ * There are the wday fields etc, but I wouldn't guarantee those
+ * are dependable either. And if the date isn't valid, either the
+ * hw or POST will do strange things.
+ *
+ * So we're left with:
+ * - year: 0-99
+ * - month: 0-11
+ * - day-of-month: 1-28
+ * - hour: 0-23
+ * - min: (0-30)*2
+ *
+ * Giving us a total range of 0-16128000 (0xf61800), ie less
+ * than 24 bits of actual data we can save across reboots.
+ *
+ * And if your box can't boot in less than three minutes,
+ * you're screwed.
+ *
+ * Now, almost 24 bits of data is pitifully small, so we need
+ * to be pretty dense if we want to use it for anything nice.
+ * What we do is that instead of saving off nice readable info,
+ * we save off _hashes_ of information that we can hopefully
+ * regenerate after the reboot.
+ *
+ * In particular, this means that we might be unlucky, and hit
+ * a case where we have a hash collision, and we end up not
+ * being able to tell for certain exactly which case happened.
+ * But that's hopefully unlikely.
+ *
+ * What we do is to take the bits we can fit, and split them
+ * into three parts (16*997*1009 = 16095568), and use the values
+ * for:
+ * - 0-15: user-settable
+ * - 0-996: file + line number
+ * - 0-1008: device
+ */
+#define USERHASH (16)
+#define FILEHASH (997)
+#define DEVHASH (1009)
+
+#define DEVSEED (7919)
+
+bool pm_trace_rtc_abused __read_mostly;
+EXPORT_SYMBOL_GPL(pm_trace_rtc_abused);
+
+static unsigned int dev_hash_value;
+
+static int set_magic_time(unsigned int user, unsigned int file, unsigned int device)
+{
+ unsigned int n = user + USERHASH*(file + FILEHASH*device);
+
+ // June 7th, 2006
+ static struct rtc_time time = {
+ .tm_sec = 0,
+ .tm_min = 0,
+ .tm_hour = 0,
+ .tm_mday = 7,
+ .tm_mon = 5, // June - counting from zero
+ .tm_year = 106,
+ .tm_wday = 3,
+ .tm_yday = 160,
+ .tm_isdst = 1
+ };
+
+ time.tm_year = (n % 100);
+ n /= 100;
+ time.tm_mon = (n % 12);
+ n /= 12;
+ time.tm_mday = (n % 28) + 1;
+ n /= 28;
+ time.tm_hour = (n % 24);
+ n /= 24;
+ time.tm_min = (n % 20) * 3;
+ n /= 20;
+ mc146818_set_time(&time);
+ pm_trace_rtc_abused = true;
+ return n ? -1 : 0;
+}
+
+static unsigned int read_magic_time(void)
+{
+ struct rtc_time time;
+ unsigned int val;
+
+ if (mc146818_get_time(&time) < 0) {
+ pr_err("Unable to read current time from RTC\n");
+ return 0;
+ }
+
+ pr_info("RTC time: %ptRt, date: %ptRd\n", &time, &time);
+ val = time.tm_year; /* 100 years */
+ if (val > 100)
+ val -= 100;
+ val += time.tm_mon * 100; /* 12 months */
+ val += (time.tm_mday-1) * 100 * 12; /* 28 month-days */
+ val += time.tm_hour * 100 * 12 * 28; /* 24 hours */
+ val += (time.tm_min / 3) * 100 * 12 * 28 * 24; /* 20 3-minute intervals */
+ return val;
+}
+
+/*
+ * This is just the sdbm hash function with a user-supplied
+ * seed and final size parameter.
+ */
+static unsigned int hash_string(unsigned int seed, const char *data, unsigned int mod)
+{
+ unsigned char c;
+ while ((c = *data++) != 0) {
+ seed = (seed << 16) + (seed << 6) - seed + c;
+ }
+ return seed % mod;
+}
+
+void set_trace_device(struct device *dev)
+{
+ dev_hash_value = hash_string(DEVSEED, dev_name(dev), DEVHASH);
+}
+EXPORT_SYMBOL(set_trace_device);
+
+/*
+ * We could just take the "tracedata" index into the .tracedata
+ * section instead. Generating a hash of the data gives us a
+ * chance to work across kernel versions, and perhaps more
+ * importantly it also gives us valid/invalid check (ie we will
+ * likely not give totally bogus reports - if the hash matches,
+ * it's not any guarantee, but it's a high _likelihood_ that
+ * the match is valid).
+ */
+void generate_pm_trace(const void *tracedata, unsigned int user)
+{
+ unsigned short lineno = *(unsigned short *)tracedata;
+ const char *file = *(const char **)(tracedata + 2);
+ unsigned int user_hash_value, file_hash_value;
+
+ if (!x86_platform.legacy.rtc)
+ return;
+
+ user_hash_value = user % USERHASH;
+ file_hash_value = hash_string(lineno, file, FILEHASH);
+ set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
+}
+EXPORT_SYMBOL(generate_pm_trace);
+
+extern char __tracedata_start[], __tracedata_end[];
+static int show_file_hash(unsigned int value)
+{
+ int match;
+ char *tracedata;
+
+ match = 0;
+ for (tracedata = __tracedata_start ; tracedata < __tracedata_end ;
+ tracedata += 2 + sizeof(unsigned long)) {
+ unsigned short lineno = *(unsigned short *)tracedata;
+ const char *file = *(const char **)(tracedata + 2);
+ unsigned int hash = hash_string(lineno, file, FILEHASH);
+ if (hash != value)
+ continue;
+ pr_info(" hash matches %s:%u\n", file, lineno);
+ match++;
+ }
+ return match;
+}
+
+static int show_dev_hash(unsigned int value)
+{
+ int match = 0;
+ struct list_head *entry;
+
+ device_pm_lock();
+ entry = dpm_list.prev;
+ while (entry != &dpm_list) {
+ struct device * dev = to_device(entry);
+ unsigned int hash = hash_string(DEVSEED, dev_name(dev), DEVHASH);
+ if (hash == value) {
+ dev_info(dev, "hash matches\n");
+ match++;
+ }
+ entry = entry->prev;
+ }
+ device_pm_unlock();
+ return match;
+}
+
+static unsigned int hash_value_early_read;
+
+int show_trace_dev_match(char *buf, size_t size)
+{
+ unsigned int value = hash_value_early_read / (USERHASH * FILEHASH);
+ int ret = 0;
+ struct list_head *entry;
+
+ /*
+ * It's possible that multiple devices will match the hash and we can't
+ * tell which is the culprit, so it's best to output them all.
+ */
+ device_pm_lock();
+ entry = dpm_list.prev;
+ while (size && entry != &dpm_list) {
+ struct device *dev = to_device(entry);
+ unsigned int hash = hash_string(DEVSEED, dev_name(dev),
+ DEVHASH);
+ if (hash == value) {
+ int len = snprintf(buf, size, "%s\n",
+ dev_driver_string(dev));
+ if (len > size)
+ len = size;
+ buf += len;
+ ret += len;
+ size -= len;
+ }
+ entry = entry->prev;
+ }
+ device_pm_unlock();
+ return ret;
+}
+
+static int
+pm_trace_notify(struct notifier_block *nb, unsigned long mode, void *_unused)
+{
+ switch (mode) {
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ if (pm_trace_rtc_abused) {
+ pm_trace_rtc_abused = false;
+ pr_warn("Possible incorrect RTC due to pm_trace, please use 'ntpdate' or 'rdate' to reset it.\n");
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block pm_trace_nb = {
+ .notifier_call = pm_trace_notify,
+};
+
+static int __init early_resume_init(void)
+{
+ if (!x86_platform.legacy.rtc)
+ return 0;
+
+ hash_value_early_read = read_magic_time();
+ register_pm_notifier(&pm_trace_nb);
+ return 0;
+}
+
+static int __init late_resume_init(void)
+{
+ unsigned int val = hash_value_early_read;
+ unsigned int user, file, dev;
+
+ if (!x86_platform.legacy.rtc)
+ return 0;
+
+ user = val % USERHASH;
+ val = val / USERHASH;
+ file = val % FILEHASH;
+ val = val / FILEHASH;
+ dev = val /* % DEVHASH */;
+
+ pr_info(" Magic number: %d:%d:%d\n", user, file, dev);
+ show_file_hash(file);
+ show_dev_hash(dev);
+ return 0;
+}
+
+core_initcall(early_resume_init);
+late_initcall(late_resume_init);
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
new file mode 100644
index 000000000..aea690c64
--- /dev/null
+++ b/drivers/base/power/wakeirq.c
@@ -0,0 +1,412 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Device wakeirq helper functions */
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
+
+#include "power.h"
+
+/**
+ * dev_pm_attach_wake_irq - Attach device interrupt as a wake IRQ
+ * @dev: Device entry
+ * @irq: Device wake-up capable interrupt
+ * @wirq: Wake irq specific data
+ *
+ * Internal function to attach either a device IO interrupt or a
+ * dedicated wake-up interrupt as a wake IRQ.
+ */
+static int dev_pm_attach_wake_irq(struct device *dev, int irq,
+ struct wake_irq *wirq)
+{
+ unsigned long flags;
+
+ if (!dev || !wirq)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ if (dev_WARN_ONCE(dev, dev->power.wakeirq,
+ "wake irq already initialized\n")) {
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+ return -EEXIST;
+ }
+
+ dev->power.wakeirq = wirq;
+ device_wakeup_attach_irq(dev, wirq);
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+ return 0;
+}
+
+/**
+ * dev_pm_set_wake_irq - Attach device IO interrupt as wake IRQ
+ * @dev: Device entry
+ * @irq: Device IO interrupt
+ *
+ * Attach a device IO interrupt as a wake IRQ. The wake IRQ gets
+ * automatically configured for wake-up from suspend based
+ * on the device specific sysfs wakeup entry. Typically called
+ * during driver probe after calling device_init_wakeup().
+ */
+int dev_pm_set_wake_irq(struct device *dev, int irq)
+{
+ struct wake_irq *wirq;
+ int err;
+
+ if (irq < 0)
+ return -EINVAL;
+
+ wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
+ if (!wirq)
+ return -ENOMEM;
+
+ wirq->dev = dev;
+ wirq->irq = irq;
+
+ err = dev_pm_attach_wake_irq(dev, irq, wirq);
+ if (err)
+ kfree(wirq);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(dev_pm_set_wake_irq);
+
+/**
+ * dev_pm_clear_wake_irq - Detach a device IO interrupt wake IRQ
+ * @dev: Device entry
+ *
+ * Detach a device wake IRQ and free resources.
+ *
+ * Note that it's OK for drivers to call this without calling
+ * dev_pm_set_wake_irq() as all the driver instances may not have
+ * a wake IRQ configured. This avoid adding wake IRQ specific
+ * checks into the drivers.
+ */
+void dev_pm_clear_wake_irq(struct device *dev)
+{
+ struct wake_irq *wirq = dev->power.wakeirq;
+ unsigned long flags;
+
+ if (!wirq)
+ return;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ device_wakeup_detach_irq(dev);
+ dev->power.wakeirq = NULL;
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) {
+ free_irq(wirq->irq, wirq);
+ wirq->status &= ~WAKE_IRQ_DEDICATED_MASK;
+ }
+ kfree(wirq->name);
+ kfree(wirq);
+}
+EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
+
+/**
+ * handle_threaded_wake_irq - Handler for dedicated wake-up interrupts
+ * @irq: Device specific dedicated wake-up interrupt
+ * @_wirq: Wake IRQ data
+ *
+ * Some devices have a separate wake-up interrupt in addition to the
+ * device IO interrupt. The wake-up interrupt signals that a device
+ * should be woken up from it's idle state. This handler uses device
+ * specific pm_runtime functions to wake the device, and then it's
+ * up to the device to do whatever it needs to. Note that as the
+ * device may need to restore context and start up regulators, we
+ * use a threaded IRQ.
+ *
+ * Also note that we are not resending the lost device interrupts.
+ * We assume that the wake-up interrupt just needs to wake-up the
+ * device, and then device's pm_runtime_resume() can deal with the
+ * situation.
+ */
+static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
+{
+ struct wake_irq *wirq = _wirq;
+ int res;
+
+ /* Maybe abort suspend? */
+ if (irqd_is_wakeup_set(irq_get_irq_data(irq))) {
+ pm_wakeup_event(wirq->dev, 0);
+
+ return IRQ_HANDLED;
+ }
+
+ /* We don't want RPM_ASYNC or RPM_NOWAIT here */
+ res = pm_runtime_resume(wirq->dev);
+ if (res < 0)
+ dev_warn(wirq->dev,
+ "wake IRQ with no resume: %i\n", res);
+
+ return IRQ_HANDLED;
+}
+
+static int __dev_pm_set_dedicated_wake_irq(struct device *dev, int irq, unsigned int flag)
+{
+ struct wake_irq *wirq;
+ int err;
+
+ if (irq < 0)
+ return -EINVAL;
+
+ wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
+ if (!wirq)
+ return -ENOMEM;
+
+ wirq->name = kasprintf(GFP_KERNEL, "%s:wakeup", dev_name(dev));
+ if (!wirq->name) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ wirq->dev = dev;
+ wirq->irq = irq;
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+
+ /* Prevent deferred spurious wakeirqs with disable_irq_nosync() */
+ irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
+
+ /*
+ * Consumer device may need to power up and restore state
+ * so we use a threaded irq.
+ */
+ err = request_threaded_irq(irq, NULL, handle_threaded_wake_irq,
+ IRQF_ONESHOT, wirq->name, wirq);
+ if (err)
+ goto err_free_name;
+
+ err = dev_pm_attach_wake_irq(dev, irq, wirq);
+ if (err)
+ goto err_free_irq;
+
+ wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED | flag;
+
+ return err;
+
+err_free_irq:
+ free_irq(irq, wirq);
+err_free_name:
+ kfree(wirq->name);
+err_free:
+ kfree(wirq);
+
+ return err;
+}
+
+
+/**
+ * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
+ * @dev: Device entry
+ * @irq: Device wake-up interrupt
+ *
+ * Unless your hardware has separate wake-up interrupts in addition
+ * to the device IO interrupts, you don't need this.
+ *
+ * Sets up a threaded interrupt handler for a device that has
+ * a dedicated wake-up interrupt in addition to the device IO
+ * interrupt.
+ *
+ * The interrupt starts disabled, and needs to be managed for
+ * the device by the bus code or the device driver using
+ * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*()
+ * functions.
+ */
+int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
+{
+ return __dev_pm_set_dedicated_wake_irq(dev, irq, 0);
+}
+EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
+
+/**
+ * dev_pm_set_dedicated_wake_irq_reverse - Request a dedicated wake-up interrupt
+ * with reverse enable ordering
+ * @dev: Device entry
+ * @irq: Device wake-up interrupt
+ *
+ * Unless your hardware has separate wake-up interrupts in addition
+ * to the device IO interrupts, you don't need this.
+ *
+ * Sets up a threaded interrupt handler for a device that has a dedicated
+ * wake-up interrupt in addition to the device IO interrupt. It sets
+ * the status of WAKE_IRQ_DEDICATED_REVERSE to tell rpm_suspend()
+ * to enable dedicated wake-up interrupt after running the runtime suspend
+ * callback for @dev.
+ *
+ * The interrupt starts disabled, and needs to be managed for
+ * the device by the bus code or the device driver using
+ * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*()
+ * functions.
+ */
+int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq)
+{
+ return __dev_pm_set_dedicated_wake_irq(dev, irq, WAKE_IRQ_DEDICATED_REVERSE);
+}
+EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq_reverse);
+
+/**
+ * dev_pm_enable_wake_irq - Enable device wake-up interrupt
+ * @dev: Device
+ *
+ * Optionally called from the bus code or the device driver for
+ * runtime_resume() to override the PM runtime core managed wake-up
+ * interrupt handling to enable the wake-up interrupt.
+ *
+ * Note that for runtime_suspend()) the wake-up interrupts
+ * should be unconditionally enabled unlike for suspend()
+ * that is conditional.
+ */
+void dev_pm_enable_wake_irq(struct device *dev)
+{
+ struct wake_irq *wirq = dev->power.wakeirq;
+
+ if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
+ enable_irq(wirq->irq);
+}
+EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq);
+
+/**
+ * dev_pm_disable_wake_irq - Disable device wake-up interrupt
+ * @dev: Device
+ *
+ * Optionally called from the bus code or the device driver for
+ * runtime_suspend() to override the PM runtime core managed wake-up
+ * interrupt handling to disable the wake-up interrupt.
+ */
+void dev_pm_disable_wake_irq(struct device *dev)
+{
+ struct wake_irq *wirq = dev->power.wakeirq;
+
+ if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
+ disable_irq_nosync(wirq->irq);
+}
+EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq);
+
+/**
+ * dev_pm_enable_wake_irq_check - Checks and enables wake-up interrupt
+ * @dev: Device
+ * @can_change_status: Can change wake-up interrupt status
+ *
+ * Enables wakeirq conditionally. We need to enable wake-up interrupt
+ * lazily on the first rpm_suspend(). This is needed as the consumer device
+ * starts in RPM_SUSPENDED state, and the the first pm_runtime_get() would
+ * otherwise try to disable already disabled wakeirq. The wake-up interrupt
+ * starts disabled with IRQ_NOAUTOEN set.
+ *
+ * Should be only called from rpm_suspend() and rpm_resume() path.
+ * Caller must hold &dev->power.lock to change wirq->status
+ */
+void dev_pm_enable_wake_irq_check(struct device *dev,
+ bool can_change_status)
+{
+ struct wake_irq *wirq = dev->power.wakeirq;
+
+ if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
+ return;
+
+ if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) {
+ goto enable;
+ } else if (can_change_status) {
+ wirq->status |= WAKE_IRQ_DEDICATED_MANAGED;
+ goto enable;
+ }
+
+ return;
+
+enable:
+ if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) {
+ enable_irq(wirq->irq);
+ wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
+ }
+}
+
+/**
+ * dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt
+ * @dev: Device
+ * @cond_disable: if set, also check WAKE_IRQ_DEDICATED_REVERSE
+ *
+ * Disables wake-up interrupt conditionally based on status.
+ * Should be only called from rpm_suspend() and rpm_resume() path.
+ */
+void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable)
+{
+ struct wake_irq *wirq = dev->power.wakeirq;
+
+ if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
+ return;
+
+ if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
+ return;
+
+ if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) {
+ wirq->status &= ~WAKE_IRQ_DEDICATED_ENABLED;
+ disable_irq_nosync(wirq->irq);
+ }
+}
+
+/**
+ * dev_pm_enable_wake_irq_complete - enable wake IRQ not enabled before
+ * @dev: Device using the wake IRQ
+ *
+ * Enable wake IRQ conditionally based on status, mainly used if want to
+ * enable wake IRQ after running ->runtime_suspend() which depends on
+ * WAKE_IRQ_DEDICATED_REVERSE.
+ *
+ * Should be only called from rpm_suspend() path.
+ */
+void dev_pm_enable_wake_irq_complete(struct device *dev)
+{
+ struct wake_irq *wirq = dev->power.wakeirq;
+
+ if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
+ return;
+
+ if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
+ wirq->status & WAKE_IRQ_DEDICATED_REVERSE)
+ enable_irq(wirq->irq);
+}
+
+/**
+ * dev_pm_arm_wake_irq - Arm device wake-up
+ * @wirq: Device wake-up interrupt
+ *
+ * Sets up the wake-up event conditionally based on the
+ * device_may_wake().
+ */
+void dev_pm_arm_wake_irq(struct wake_irq *wirq)
+{
+ if (!wirq)
+ return;
+
+ if (device_may_wakeup(wirq->dev)) {
+ if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
+ !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
+ enable_irq(wirq->irq);
+
+ enable_irq_wake(wirq->irq);
+ }
+}
+
+/**
+ * dev_pm_disarm_wake_irq - Disarm device wake-up
+ * @wirq: Device wake-up interrupt
+ *
+ * Clears up the wake-up event conditionally based on the
+ * device_may_wake().
+ */
+void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
+{
+ if (!wirq)
+ return;
+
+ if (device_may_wakeup(wirq->dev)) {
+ disable_irq_wake(wirq->irq);
+
+ if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
+ !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
+ disable_irq_nosync(wirq->irq);
+ }
+}
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
new file mode 100644
index 000000000..8997e0227
--- /dev/null
+++ b/drivers/base/power/wakeup.c
@@ -0,0 +1,1214 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/base/power/wakeup.c - System wakeup events framework
+ *
+ * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ */
+#define pr_fmt(fmt) "PM: " fmt
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/sched/signal.h>
+#include <linux/capability.h>
+#include <linux/export.h>
+#include <linux/suspend.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/pm_wakeirq.h>
+#include <trace/events/power.h>
+
+#include "power.h"
+
+#ifndef CONFIG_SUSPEND
+suspend_state_t pm_suspend_target_state;
+#define pm_suspend_target_state (PM_SUSPEND_ON)
+#endif
+
+#define list_for_each_entry_rcu_locked(pos, head, member) \
+ list_for_each_entry_rcu(pos, head, member, \
+ srcu_read_lock_held(&wakeup_srcu))
+/*
+ * If set, the suspend/hibernate code will abort transitions to a sleep state
+ * if wakeup events are registered during or immediately before the transition.
+ */
+bool events_check_enabled __read_mostly;
+
+/* First wakeup IRQ seen by the kernel in the last cycle. */
+static unsigned int wakeup_irq[2] __read_mostly;
+static DEFINE_RAW_SPINLOCK(wakeup_irq_lock);
+
+/* If greater than 0 and the system is suspending, terminate the suspend. */
+static atomic_t pm_abort_suspend __read_mostly;
+
+/*
+ * Combined counters of registered wakeup events and wakeup events in progress.
+ * They need to be modified together atomically, so it's better to use one
+ * atomic variable to hold them both.
+ */
+static atomic_t combined_event_count = ATOMIC_INIT(0);
+
+#define IN_PROGRESS_BITS (sizeof(int) * 4)
+#define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
+
+static void split_counters(unsigned int *cnt, unsigned int *inpr)
+{
+ unsigned int comb = atomic_read(&combined_event_count);
+
+ *cnt = (comb >> IN_PROGRESS_BITS);
+ *inpr = comb & MAX_IN_PROGRESS;
+}
+
+/* A preserved old value of the events counter. */
+static unsigned int saved_count;
+
+static DEFINE_RAW_SPINLOCK(events_lock);
+
+static void pm_wakeup_timer_fn(struct timer_list *t);
+
+static LIST_HEAD(wakeup_sources);
+
+static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
+
+DEFINE_STATIC_SRCU(wakeup_srcu);
+
+static struct wakeup_source deleted_ws = {
+ .name = "deleted",
+ .lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
+};
+
+static DEFINE_IDA(wakeup_ida);
+
+/**
+ * wakeup_source_create - Create a struct wakeup_source object.
+ * @name: Name of the new wakeup source.
+ */
+struct wakeup_source *wakeup_source_create(const char *name)
+{
+ struct wakeup_source *ws;
+ const char *ws_name;
+ int id;
+
+ ws = kzalloc(sizeof(*ws), GFP_KERNEL);
+ if (!ws)
+ goto err_ws;
+
+ ws_name = kstrdup_const(name, GFP_KERNEL);
+ if (!ws_name)
+ goto err_name;
+ ws->name = ws_name;
+
+ id = ida_alloc(&wakeup_ida, GFP_KERNEL);
+ if (id < 0)
+ goto err_id;
+ ws->id = id;
+
+ return ws;
+
+err_id:
+ kfree_const(ws->name);
+err_name:
+ kfree(ws);
+err_ws:
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(wakeup_source_create);
+
+/*
+ * Record wakeup_source statistics being deleted into a dummy wakeup_source.
+ */
+static void wakeup_source_record(struct wakeup_source *ws)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&deleted_ws.lock, flags);
+
+ if (ws->event_count) {
+ deleted_ws.total_time =
+ ktime_add(deleted_ws.total_time, ws->total_time);
+ deleted_ws.prevent_sleep_time =
+ ktime_add(deleted_ws.prevent_sleep_time,
+ ws->prevent_sleep_time);
+ deleted_ws.max_time =
+ ktime_compare(deleted_ws.max_time, ws->max_time) > 0 ?
+ deleted_ws.max_time : ws->max_time;
+ deleted_ws.event_count += ws->event_count;
+ deleted_ws.active_count += ws->active_count;
+ deleted_ws.relax_count += ws->relax_count;
+ deleted_ws.expire_count += ws->expire_count;
+ deleted_ws.wakeup_count += ws->wakeup_count;
+ }
+
+ spin_unlock_irqrestore(&deleted_ws.lock, flags);
+}
+
+static void wakeup_source_free(struct wakeup_source *ws)
+{
+ ida_free(&wakeup_ida, ws->id);
+ kfree_const(ws->name);
+ kfree(ws);
+}
+
+/**
+ * wakeup_source_destroy - Destroy a struct wakeup_source object.
+ * @ws: Wakeup source to destroy.
+ *
+ * Use only for wakeup source objects created with wakeup_source_create().
+ */
+void wakeup_source_destroy(struct wakeup_source *ws)
+{
+ if (!ws)
+ return;
+
+ __pm_relax(ws);
+ wakeup_source_record(ws);
+ wakeup_source_free(ws);
+}
+EXPORT_SYMBOL_GPL(wakeup_source_destroy);
+
+/**
+ * wakeup_source_add - Add given object to the list of wakeup sources.
+ * @ws: Wakeup source object to add to the list.
+ */
+void wakeup_source_add(struct wakeup_source *ws)
+{
+ unsigned long flags;
+
+ if (WARN_ON(!ws))
+ return;
+
+ spin_lock_init(&ws->lock);
+ timer_setup(&ws->timer, pm_wakeup_timer_fn, 0);
+ ws->active = false;
+
+ raw_spin_lock_irqsave(&events_lock, flags);
+ list_add_rcu(&ws->entry, &wakeup_sources);
+ raw_spin_unlock_irqrestore(&events_lock, flags);
+}
+EXPORT_SYMBOL_GPL(wakeup_source_add);
+
+/**
+ * wakeup_source_remove - Remove given object from the wakeup sources list.
+ * @ws: Wakeup source object to remove from the list.
+ */
+void wakeup_source_remove(struct wakeup_source *ws)
+{
+ unsigned long flags;
+
+ if (WARN_ON(!ws))
+ return;
+
+ raw_spin_lock_irqsave(&events_lock, flags);
+ list_del_rcu(&ws->entry);
+ raw_spin_unlock_irqrestore(&events_lock, flags);
+ synchronize_srcu(&wakeup_srcu);
+
+ del_timer_sync(&ws->timer);
+ /*
+ * Clear timer.function to make wakeup_source_not_registered() treat
+ * this wakeup source as not registered.
+ */
+ ws->timer.function = NULL;
+}
+EXPORT_SYMBOL_GPL(wakeup_source_remove);
+
+/**
+ * wakeup_source_register - Create wakeup source and add it to the list.
+ * @dev: Device this wakeup source is associated with (or NULL if virtual).
+ * @name: Name of the wakeup source to register.
+ */
+struct wakeup_source *wakeup_source_register(struct device *dev,
+ const char *name)
+{
+ struct wakeup_source *ws;
+ int ret;
+
+ ws = wakeup_source_create(name);
+ if (ws) {
+ if (!dev || device_is_registered(dev)) {
+ ret = wakeup_source_sysfs_add(dev, ws);
+ if (ret) {
+ wakeup_source_free(ws);
+ return NULL;
+ }
+ }
+ wakeup_source_add(ws);
+ }
+ return ws;
+}
+EXPORT_SYMBOL_GPL(wakeup_source_register);
+
+/**
+ * wakeup_source_unregister - Remove wakeup source from the list and remove it.
+ * @ws: Wakeup source object to unregister.
+ */
+void wakeup_source_unregister(struct wakeup_source *ws)
+{
+ if (ws) {
+ wakeup_source_remove(ws);
+ if (ws->dev)
+ wakeup_source_sysfs_remove(ws);
+
+ wakeup_source_destroy(ws);
+ }
+}
+EXPORT_SYMBOL_GPL(wakeup_source_unregister);
+
+/**
+ * wakeup_sources_read_lock - Lock wakeup source list for read.
+ *
+ * Returns an index of srcu lock for struct wakeup_srcu.
+ * This index must be passed to the matching wakeup_sources_read_unlock().
+ */
+int wakeup_sources_read_lock(void)
+{
+ return srcu_read_lock(&wakeup_srcu);
+}
+EXPORT_SYMBOL_GPL(wakeup_sources_read_lock);
+
+/**
+ * wakeup_sources_read_unlock - Unlock wakeup source list.
+ * @idx: return value from corresponding wakeup_sources_read_lock()
+ */
+void wakeup_sources_read_unlock(int idx)
+{
+ srcu_read_unlock(&wakeup_srcu, idx);
+}
+EXPORT_SYMBOL_GPL(wakeup_sources_read_unlock);
+
+/**
+ * wakeup_sources_walk_start - Begin a walk on wakeup source list
+ *
+ * Returns first object of the list of wakeup sources.
+ *
+ * Note that to be safe, wakeup sources list needs to be locked by calling
+ * wakeup_source_read_lock() for this.
+ */
+struct wakeup_source *wakeup_sources_walk_start(void)
+{
+ struct list_head *ws_head = &wakeup_sources;
+
+ return list_entry_rcu(ws_head->next, struct wakeup_source, entry);
+}
+EXPORT_SYMBOL_GPL(wakeup_sources_walk_start);
+
+/**
+ * wakeup_sources_walk_next - Get next wakeup source from the list
+ * @ws: Previous wakeup source object
+ *
+ * Note that to be safe, wakeup sources list needs to be locked by calling
+ * wakeup_source_read_lock() for this.
+ */
+struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws)
+{
+ struct list_head *ws_head = &wakeup_sources;
+
+ return list_next_or_null_rcu(ws_head, &ws->entry,
+ struct wakeup_source, entry);
+}
+EXPORT_SYMBOL_GPL(wakeup_sources_walk_next);
+
+/**
+ * device_wakeup_attach - Attach a wakeup source object to a device object.
+ * @dev: Device to handle.
+ * @ws: Wakeup source object to attach to @dev.
+ *
+ * This causes @dev to be treated as a wakeup device.
+ */
+static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
+{
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ spin_unlock_irq(&dev->power.lock);
+ return -EEXIST;
+ }
+ dev->power.wakeup = ws;
+ if (dev->power.wakeirq)
+ device_wakeup_attach_irq(dev, dev->power.wakeirq);
+ spin_unlock_irq(&dev->power.lock);
+ return 0;
+}
+
+/**
+ * device_wakeup_enable - Enable given device to be a wakeup source.
+ * @dev: Device to handle.
+ *
+ * Create a wakeup source object, register it and attach it to @dev.
+ */
+int device_wakeup_enable(struct device *dev)
+{
+ struct wakeup_source *ws;
+ int ret;
+
+ if (!dev || !dev->power.can_wakeup)
+ return -EINVAL;
+
+ if (pm_suspend_target_state != PM_SUSPEND_ON)
+ dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__);
+
+ ws = wakeup_source_register(dev, dev_name(dev));
+ if (!ws)
+ return -ENOMEM;
+
+ ret = device_wakeup_attach(dev, ws);
+ if (ret)
+ wakeup_source_unregister(ws);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(device_wakeup_enable);
+
+/**
+ * device_wakeup_attach_irq - Attach a wakeirq to a wakeup source
+ * @dev: Device to handle
+ * @wakeirq: Device specific wakeirq entry
+ *
+ * Attach a device wakeirq to the wakeup source so the device
+ * wake IRQ can be configured automatically for suspend and
+ * resume.
+ *
+ * Call under the device's power.lock lock.
+ */
+void device_wakeup_attach_irq(struct device *dev,
+ struct wake_irq *wakeirq)
+{
+ struct wakeup_source *ws;
+
+ ws = dev->power.wakeup;
+ if (!ws)
+ return;
+
+ if (ws->wakeirq)
+ dev_err(dev, "Leftover wakeup IRQ found, overriding\n");
+
+ ws->wakeirq = wakeirq;
+}
+
+/**
+ * device_wakeup_detach_irq - Detach a wakeirq from a wakeup source
+ * @dev: Device to handle
+ *
+ * Removes a device wakeirq from the wakeup source.
+ *
+ * Call under the device's power.lock lock.
+ */
+void device_wakeup_detach_irq(struct device *dev)
+{
+ struct wakeup_source *ws;
+
+ ws = dev->power.wakeup;
+ if (ws)
+ ws->wakeirq = NULL;
+}
+
+/**
+ * device_wakeup_arm_wake_irqs(void)
+ *
+ * Itereates over the list of device wakeirqs to arm them.
+ */
+void device_wakeup_arm_wake_irqs(void)
+{
+ struct wakeup_source *ws;
+ int srcuidx;
+
+ srcuidx = srcu_read_lock(&wakeup_srcu);
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
+ dev_pm_arm_wake_irq(ws->wakeirq);
+ srcu_read_unlock(&wakeup_srcu, srcuidx);
+}
+
+/**
+ * device_wakeup_disarm_wake_irqs(void)
+ *
+ * Itereates over the list of device wakeirqs to disarm them.
+ */
+void device_wakeup_disarm_wake_irqs(void)
+{
+ struct wakeup_source *ws;
+ int srcuidx;
+
+ srcuidx = srcu_read_lock(&wakeup_srcu);
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
+ dev_pm_disarm_wake_irq(ws->wakeirq);
+ srcu_read_unlock(&wakeup_srcu, srcuidx);
+}
+
+/**
+ * device_wakeup_detach - Detach a device's wakeup source object from it.
+ * @dev: Device to detach the wakeup source object from.
+ *
+ * After it returns, @dev will not be treated as a wakeup device any more.
+ */
+static struct wakeup_source *device_wakeup_detach(struct device *dev)
+{
+ struct wakeup_source *ws;
+
+ spin_lock_irq(&dev->power.lock);
+ ws = dev->power.wakeup;
+ dev->power.wakeup = NULL;
+ spin_unlock_irq(&dev->power.lock);
+ return ws;
+}
+
+/**
+ * device_wakeup_disable - Do not regard a device as a wakeup source any more.
+ * @dev: Device to handle.
+ *
+ * Detach the @dev's wakeup source object from it, unregister this wakeup source
+ * object and destroy it.
+ */
+int device_wakeup_disable(struct device *dev)
+{
+ struct wakeup_source *ws;
+
+ if (!dev || !dev->power.can_wakeup)
+ return -EINVAL;
+
+ ws = device_wakeup_detach(dev);
+ wakeup_source_unregister(ws);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(device_wakeup_disable);
+
+/**
+ * device_set_wakeup_capable - Set/reset device wakeup capability flag.
+ * @dev: Device to handle.
+ * @capable: Whether or not @dev is capable of waking up the system from sleep.
+ *
+ * If @capable is set, set the @dev's power.can_wakeup flag and add its
+ * wakeup-related attributes to sysfs. Otherwise, unset the @dev's
+ * power.can_wakeup flag and remove its wakeup-related attributes from sysfs.
+ *
+ * This function may sleep and it can't be called from any context where
+ * sleeping is not allowed.
+ */
+void device_set_wakeup_capable(struct device *dev, bool capable)
+{
+ if (!!dev->power.can_wakeup == !!capable)
+ return;
+
+ dev->power.can_wakeup = capable;
+ if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
+ if (capable) {
+ int ret = wakeup_sysfs_add(dev);
+
+ if (ret)
+ dev_info(dev, "Wakeup sysfs attributes not added\n");
+ } else {
+ wakeup_sysfs_remove(dev);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
+
+/**
+ * device_init_wakeup - Device wakeup initialization.
+ * @dev: Device to handle.
+ * @enable: Whether or not to enable @dev as a wakeup device.
+ *
+ * By default, most devices should leave wakeup disabled. The exceptions are
+ * devices that everyone expects to be wakeup sources: keyboards, power buttons,
+ * possibly network interfaces, etc. Also, devices that don't generate their
+ * own wakeup requests but merely forward requests from one bus to another
+ * (like PCI bridges) should have wakeup enabled by default.
+ */
+int device_init_wakeup(struct device *dev, bool enable)
+{
+ int ret = 0;
+
+ if (!dev)
+ return -EINVAL;
+
+ if (enable) {
+ device_set_wakeup_capable(dev, true);
+ ret = device_wakeup_enable(dev);
+ } else {
+ device_wakeup_disable(dev);
+ device_set_wakeup_capable(dev, false);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(device_init_wakeup);
+
+/**
+ * device_set_wakeup_enable - Enable or disable a device to wake up the system.
+ * @dev: Device to handle.
+ */
+int device_set_wakeup_enable(struct device *dev, bool enable)
+{
+ return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
+}
+EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
+
+/**
+ * wakeup_source_not_registered - validate the given wakeup source.
+ * @ws: Wakeup source to be validated.
+ */
+static bool wakeup_source_not_registered(struct wakeup_source *ws)
+{
+ /*
+ * Use timer struct to check if the given source is initialized
+ * by wakeup_source_add.
+ */
+ return ws->timer.function != pm_wakeup_timer_fn;
+}
+
+/*
+ * The functions below use the observation that each wakeup event starts a
+ * period in which the system should not be suspended. The moment this period
+ * will end depends on how the wakeup event is going to be processed after being
+ * detected and all of the possible cases can be divided into two distinct
+ * groups.
+ *
+ * First, a wakeup event may be detected by the same functional unit that will
+ * carry out the entire processing of it and possibly will pass it to user space
+ * for further processing. In that case the functional unit that has detected
+ * the event may later "close" the "no suspend" period associated with it
+ * directly as soon as it has been dealt with. The pair of pm_stay_awake() and
+ * pm_relax(), balanced with each other, is supposed to be used in such
+ * situations.
+ *
+ * Second, a wakeup event may be detected by one functional unit and processed
+ * by another one. In that case the unit that has detected it cannot really
+ * "close" the "no suspend" period associated with it, unless it knows in
+ * advance what's going to happen to the event during processing. This
+ * knowledge, however, may not be available to it, so it can simply specify time
+ * to wait before the system can be suspended and pass it as the second
+ * argument of pm_wakeup_event().
+ *
+ * It is valid to call pm_relax() after pm_wakeup_event(), in which case the
+ * "no suspend" period will be ended either by the pm_relax(), or by the timer
+ * function executed when the timer expires, whichever comes first.
+ */
+
+/**
+ * wakup_source_activate - Mark given wakeup source as active.
+ * @ws: Wakeup source to handle.
+ *
+ * Update the @ws' statistics and, if @ws has just been activated, notify the PM
+ * core of the event by incrementing the counter of of wakeup events being
+ * processed.
+ */
+static void wakeup_source_activate(struct wakeup_source *ws)
+{
+ unsigned int cec;
+
+ if (WARN_ONCE(wakeup_source_not_registered(ws),
+ "unregistered wakeup source\n"))
+ return;
+
+ ws->active = true;
+ ws->active_count++;
+ ws->last_time = ktime_get();
+ if (ws->autosleep_enabled)
+ ws->start_prevent_time = ws->last_time;
+
+ /* Increment the counter of events in progress. */
+ cec = atomic_inc_return(&combined_event_count);
+
+ trace_wakeup_source_activate(ws->name, cec);
+}
+
+/**
+ * wakeup_source_report_event - Report wakeup event using the given source.
+ * @ws: Wakeup source to report the event for.
+ * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
+ */
+static void wakeup_source_report_event(struct wakeup_source *ws, bool hard)
+{
+ ws->event_count++;
+ /* This is racy, but the counter is approximate anyway. */
+ if (events_check_enabled)
+ ws->wakeup_count++;
+
+ if (!ws->active)
+ wakeup_source_activate(ws);
+
+ if (hard)
+ pm_system_wakeup();
+}
+
+/**
+ * __pm_stay_awake - Notify the PM core of a wakeup event.
+ * @ws: Wakeup source object associated with the source of the event.
+ *
+ * It is safe to call this function from interrupt context.
+ */
+void __pm_stay_awake(struct wakeup_source *ws)
+{
+ unsigned long flags;
+
+ if (!ws)
+ return;
+
+ spin_lock_irqsave(&ws->lock, flags);
+
+ wakeup_source_report_event(ws, false);
+ del_timer(&ws->timer);
+ ws->timer_expires = 0;
+
+ spin_unlock_irqrestore(&ws->lock, flags);
+}
+EXPORT_SYMBOL_GPL(__pm_stay_awake);
+
+/**
+ * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
+ * @dev: Device the wakeup event is related to.
+ *
+ * Notify the PM core of a wakeup event (signaled by @dev) by calling
+ * __pm_stay_awake for the @dev's wakeup source object.
+ *
+ * Call this function after detecting of a wakeup event if pm_relax() is going
+ * to be called directly after processing the event (and possibly passing it to
+ * user space for further processing).
+ */
+void pm_stay_awake(struct device *dev)
+{
+ unsigned long flags;
+
+ if (!dev)
+ return;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ __pm_stay_awake(dev->power.wakeup);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+}
+EXPORT_SYMBOL_GPL(pm_stay_awake);
+
+#ifdef CONFIG_PM_AUTOSLEEP
+static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now)
+{
+ ktime_t delta = ktime_sub(now, ws->start_prevent_time);
+ ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta);
+}
+#else
+static inline void update_prevent_sleep_time(struct wakeup_source *ws,
+ ktime_t now) {}
+#endif
+
+/**
+ * wakup_source_deactivate - Mark given wakeup source as inactive.
+ * @ws: Wakeup source to handle.
+ *
+ * Update the @ws' statistics and notify the PM core that the wakeup source has
+ * become inactive by decrementing the counter of wakeup events being processed
+ * and incrementing the counter of registered wakeup events.
+ */
+static void wakeup_source_deactivate(struct wakeup_source *ws)
+{
+ unsigned int cnt, inpr, cec;
+ ktime_t duration;
+ ktime_t now;
+
+ ws->relax_count++;
+ /*
+ * __pm_relax() may be called directly or from a timer function.
+ * If it is called directly right after the timer function has been
+ * started, but before the timer function calls __pm_relax(), it is
+ * possible that __pm_stay_awake() will be called in the meantime and
+ * will set ws->active. Then, ws->active may be cleared immediately
+ * by the __pm_relax() called from the timer function, but in such a
+ * case ws->relax_count will be different from ws->active_count.
+ */
+ if (ws->relax_count != ws->active_count) {
+ ws->relax_count--;
+ return;
+ }
+
+ ws->active = false;
+
+ now = ktime_get();
+ duration = ktime_sub(now, ws->last_time);
+ ws->total_time = ktime_add(ws->total_time, duration);
+ if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
+ ws->max_time = duration;
+
+ ws->last_time = now;
+ del_timer(&ws->timer);
+ ws->timer_expires = 0;
+
+ if (ws->autosleep_enabled)
+ update_prevent_sleep_time(ws, now);
+
+ /*
+ * Increment the counter of registered wakeup events and decrement the
+ * couter of wakeup events in progress simultaneously.
+ */
+ cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
+ trace_wakeup_source_deactivate(ws->name, cec);
+
+ split_counters(&cnt, &inpr);
+ if (!inpr && waitqueue_active(&wakeup_count_wait_queue))
+ wake_up(&wakeup_count_wait_queue);
+}
+
+/**
+ * __pm_relax - Notify the PM core that processing of a wakeup event has ended.
+ * @ws: Wakeup source object associated with the source of the event.
+ *
+ * Call this function for wakeup events whose processing started with calling
+ * __pm_stay_awake().
+ *
+ * It is safe to call it from interrupt context.
+ */
+void __pm_relax(struct wakeup_source *ws)
+{
+ unsigned long flags;
+
+ if (!ws)
+ return;
+
+ spin_lock_irqsave(&ws->lock, flags);
+ if (ws->active)
+ wakeup_source_deactivate(ws);
+ spin_unlock_irqrestore(&ws->lock, flags);
+}
+EXPORT_SYMBOL_GPL(__pm_relax);
+
+/**
+ * pm_relax - Notify the PM core that processing of a wakeup event has ended.
+ * @dev: Device that signaled the event.
+ *
+ * Execute __pm_relax() for the @dev's wakeup source object.
+ */
+void pm_relax(struct device *dev)
+{
+ unsigned long flags;
+
+ if (!dev)
+ return;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ __pm_relax(dev->power.wakeup);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+}
+EXPORT_SYMBOL_GPL(pm_relax);
+
+/**
+ * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
+ * @data: Address of the wakeup source object associated with the event source.
+ *
+ * Call wakeup_source_deactivate() for the wakeup source whose address is stored
+ * in @data if it is currently active and its timer has not been canceled and
+ * the expiration time of the timer is not in future.
+ */
+static void pm_wakeup_timer_fn(struct timer_list *t)
+{
+ struct wakeup_source *ws = from_timer(ws, t, timer);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ws->lock, flags);
+
+ if (ws->active && ws->timer_expires
+ && time_after_eq(jiffies, ws->timer_expires)) {
+ wakeup_source_deactivate(ws);
+ ws->expire_count++;
+ }
+
+ spin_unlock_irqrestore(&ws->lock, flags);
+}
+
+/**
+ * pm_wakeup_ws_event - Notify the PM core of a wakeup event.
+ * @ws: Wakeup source object associated with the event source.
+ * @msec: Anticipated event processing time (in milliseconds).
+ * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
+ *
+ * Notify the PM core of a wakeup event whose source is @ws that will take
+ * approximately @msec milliseconds to be processed by the kernel. If @ws is
+ * not active, activate it. If @msec is nonzero, set up the @ws' timer to
+ * execute pm_wakeup_timer_fn() in future.
+ *
+ * It is safe to call this function from interrupt context.
+ */
+void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard)
+{
+ unsigned long flags;
+ unsigned long expires;
+
+ if (!ws)
+ return;
+
+ spin_lock_irqsave(&ws->lock, flags);
+
+ wakeup_source_report_event(ws, hard);
+
+ if (!msec) {
+ wakeup_source_deactivate(ws);
+ goto unlock;
+ }
+
+ expires = jiffies + msecs_to_jiffies(msec);
+ if (!expires)
+ expires = 1;
+
+ if (!ws->timer_expires || time_after(expires, ws->timer_expires)) {
+ mod_timer(&ws->timer, expires);
+ ws->timer_expires = expires;
+ }
+
+ unlock:
+ spin_unlock_irqrestore(&ws->lock, flags);
+}
+EXPORT_SYMBOL_GPL(pm_wakeup_ws_event);
+
+/**
+ * pm_wakeup_dev_event - Notify the PM core of a wakeup event.
+ * @dev: Device the wakeup event is related to.
+ * @msec: Anticipated event processing time (in milliseconds).
+ * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
+ *
+ * Call pm_wakeup_ws_event() for the @dev's wakeup source object.
+ */
+void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard)
+{
+ unsigned long flags;
+
+ if (!dev)
+ return;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ pm_wakeup_ws_event(dev->power.wakeup, msec, hard);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+}
+EXPORT_SYMBOL_GPL(pm_wakeup_dev_event);
+
+void pm_print_active_wakeup_sources(void)
+{
+ struct wakeup_source *ws;
+ int srcuidx, active = 0;
+ struct wakeup_source *last_activity_ws = NULL;
+
+ srcuidx = srcu_read_lock(&wakeup_srcu);
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
+ if (ws->active) {
+ pm_pr_dbg("active wakeup source: %s\n", ws->name);
+ active = 1;
+ } else if (!active &&
+ (!last_activity_ws ||
+ ktime_to_ns(ws->last_time) >
+ ktime_to_ns(last_activity_ws->last_time))) {
+ last_activity_ws = ws;
+ }
+ }
+
+ if (!active && last_activity_ws)
+ pm_pr_dbg("last active wakeup source: %s\n",
+ last_activity_ws->name);
+ srcu_read_unlock(&wakeup_srcu, srcuidx);
+}
+EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
+
+/**
+ * pm_wakeup_pending - Check if power transition in progress should be aborted.
+ *
+ * Compare the current number of registered wakeup events with its preserved
+ * value from the past and return true if new wakeup events have been registered
+ * since the old value was stored. Also return true if the current number of
+ * wakeup events being processed is different from zero.
+ */
+bool pm_wakeup_pending(void)
+{
+ unsigned long flags;
+ bool ret = false;
+
+ raw_spin_lock_irqsave(&events_lock, flags);
+ if (events_check_enabled) {
+ unsigned int cnt, inpr;
+
+ split_counters(&cnt, &inpr);
+ ret = (cnt != saved_count || inpr > 0);
+ events_check_enabled = !ret;
+ }
+ raw_spin_unlock_irqrestore(&events_lock, flags);
+
+ if (ret) {
+ pm_pr_dbg("Wakeup pending, aborting suspend\n");
+ pm_print_active_wakeup_sources();
+ }
+
+ return ret || atomic_read(&pm_abort_suspend) > 0;
+}
+
+void pm_system_wakeup(void)
+{
+ atomic_inc(&pm_abort_suspend);
+ s2idle_wake();
+}
+EXPORT_SYMBOL_GPL(pm_system_wakeup);
+
+void pm_system_cancel_wakeup(void)
+{
+ atomic_dec_if_positive(&pm_abort_suspend);
+}
+
+void pm_wakeup_clear(unsigned int irq_number)
+{
+ raw_spin_lock_irq(&wakeup_irq_lock);
+
+ if (irq_number && wakeup_irq[0] == irq_number)
+ wakeup_irq[0] = wakeup_irq[1];
+ else
+ wakeup_irq[0] = 0;
+
+ wakeup_irq[1] = 0;
+
+ raw_spin_unlock_irq(&wakeup_irq_lock);
+
+ if (!irq_number)
+ atomic_set(&pm_abort_suspend, 0);
+}
+
+void pm_system_irq_wakeup(unsigned int irq_number)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&wakeup_irq_lock, flags);
+
+ if (wakeup_irq[0] == 0)
+ wakeup_irq[0] = irq_number;
+ else if (wakeup_irq[1] == 0)
+ wakeup_irq[1] = irq_number;
+ else
+ irq_number = 0;
+
+ raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags);
+
+ if (irq_number)
+ pm_system_wakeup();
+}
+
+unsigned int pm_wakeup_irq(void)
+{
+ return wakeup_irq[0];
+}
+
+/**
+ * pm_get_wakeup_count - Read the number of registered wakeup events.
+ * @count: Address to store the value at.
+ * @block: Whether or not to block.
+ *
+ * Store the number of registered wakeup events at the address in @count. If
+ * @block is set, block until the current number of wakeup events being
+ * processed is zero.
+ *
+ * Return 'false' if the current number of wakeup events being processed is
+ * nonzero. Otherwise return 'true'.
+ */
+bool pm_get_wakeup_count(unsigned int *count, bool block)
+{
+ unsigned int cnt, inpr;
+
+ if (block) {
+ DEFINE_WAIT(wait);
+
+ for (;;) {
+ prepare_to_wait(&wakeup_count_wait_queue, &wait,
+ TASK_INTERRUPTIBLE);
+ split_counters(&cnt, &inpr);
+ if (inpr == 0 || signal_pending(current))
+ break;
+ pm_print_active_wakeup_sources();
+ schedule();
+ }
+ finish_wait(&wakeup_count_wait_queue, &wait);
+ }
+
+ split_counters(&cnt, &inpr);
+ *count = cnt;
+ return !inpr;
+}
+
+/**
+ * pm_save_wakeup_count - Save the current number of registered wakeup events.
+ * @count: Value to compare with the current number of registered wakeup events.
+ *
+ * If @count is equal to the current number of registered wakeup events and the
+ * current number of wakeup events being processed is zero, store @count as the
+ * old number of registered wakeup events for pm_check_wakeup_events(), enable
+ * wakeup events detection and return 'true'. Otherwise disable wakeup events
+ * detection and return 'false'.
+ */
+bool pm_save_wakeup_count(unsigned int count)
+{
+ unsigned int cnt, inpr;
+ unsigned long flags;
+
+ events_check_enabled = false;
+ raw_spin_lock_irqsave(&events_lock, flags);
+ split_counters(&cnt, &inpr);
+ if (cnt == count && inpr == 0) {
+ saved_count = count;
+ events_check_enabled = true;
+ }
+ raw_spin_unlock_irqrestore(&events_lock, flags);
+ return events_check_enabled;
+}
+
+#ifdef CONFIG_PM_AUTOSLEEP
+/**
+ * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources.
+ * @enabled: Whether to set or to clear the autosleep_enabled flags.
+ */
+void pm_wakep_autosleep_enabled(bool set)
+{
+ struct wakeup_source *ws;
+ ktime_t now = ktime_get();
+ int srcuidx;
+
+ srcuidx = srcu_read_lock(&wakeup_srcu);
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
+ spin_lock_irq(&ws->lock);
+ if (ws->autosleep_enabled != set) {
+ ws->autosleep_enabled = set;
+ if (ws->active) {
+ if (set)
+ ws->start_prevent_time = now;
+ else
+ update_prevent_sleep_time(ws, now);
+ }
+ }
+ spin_unlock_irq(&ws->lock);
+ }
+ srcu_read_unlock(&wakeup_srcu, srcuidx);
+}
+#endif /* CONFIG_PM_AUTOSLEEP */
+
+/**
+ * print_wakeup_source_stats - Print wakeup source statistics information.
+ * @m: seq_file to print the statistics into.
+ * @ws: Wakeup source object to print the statistics for.
+ */
+static int print_wakeup_source_stats(struct seq_file *m,
+ struct wakeup_source *ws)
+{
+ unsigned long flags;
+ ktime_t total_time;
+ ktime_t max_time;
+ unsigned long active_count;
+ ktime_t active_time;
+ ktime_t prevent_sleep_time;
+
+ spin_lock_irqsave(&ws->lock, flags);
+
+ total_time = ws->total_time;
+ max_time = ws->max_time;
+ prevent_sleep_time = ws->prevent_sleep_time;
+ active_count = ws->active_count;
+ if (ws->active) {
+ ktime_t now = ktime_get();
+
+ active_time = ktime_sub(now, ws->last_time);
+ total_time = ktime_add(total_time, active_time);
+ if (active_time > max_time)
+ max_time = active_time;
+
+ if (ws->autosleep_enabled)
+ prevent_sleep_time = ktime_add(prevent_sleep_time,
+ ktime_sub(now, ws->start_prevent_time));
+ } else {
+ active_time = 0;
+ }
+
+ seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
+ ws->name, active_count, ws->event_count,
+ ws->wakeup_count, ws->expire_count,
+ ktime_to_ms(active_time), ktime_to_ms(total_time),
+ ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
+ ktime_to_ms(prevent_sleep_time));
+
+ spin_unlock_irqrestore(&ws->lock, flags);
+
+ return 0;
+}
+
+static void *wakeup_sources_stats_seq_start(struct seq_file *m,
+ loff_t *pos)
+{
+ struct wakeup_source *ws;
+ loff_t n = *pos;
+ int *srcuidx = m->private;
+
+ if (n == 0) {
+ seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
+ "expire_count\tactive_since\ttotal_time\tmax_time\t"
+ "last_change\tprevent_suspend_time\n");
+ }
+
+ *srcuidx = srcu_read_lock(&wakeup_srcu);
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
+ if (n-- <= 0)
+ return ws;
+ }
+
+ return NULL;
+}
+
+static void *wakeup_sources_stats_seq_next(struct seq_file *m,
+ void *v, loff_t *pos)
+{
+ struct wakeup_source *ws = v;
+ struct wakeup_source *next_ws = NULL;
+
+ ++(*pos);
+
+ list_for_each_entry_continue_rcu(ws, &wakeup_sources, entry) {
+ next_ws = ws;
+ break;
+ }
+
+ if (!next_ws)
+ print_wakeup_source_stats(m, &deleted_ws);
+
+ return next_ws;
+}
+
+static void wakeup_sources_stats_seq_stop(struct seq_file *m, void *v)
+{
+ int *srcuidx = m->private;
+
+ srcu_read_unlock(&wakeup_srcu, *srcuidx);
+}
+
+/**
+ * wakeup_sources_stats_seq_show - Print wakeup sources statistics information.
+ * @m: seq_file to print the statistics into.
+ * @v: wakeup_source of each iteration
+ */
+static int wakeup_sources_stats_seq_show(struct seq_file *m, void *v)
+{
+ struct wakeup_source *ws = v;
+
+ print_wakeup_source_stats(m, ws);
+
+ return 0;
+}
+
+static const struct seq_operations wakeup_sources_stats_seq_ops = {
+ .start = wakeup_sources_stats_seq_start,
+ .next = wakeup_sources_stats_seq_next,
+ .stop = wakeup_sources_stats_seq_stop,
+ .show = wakeup_sources_stats_seq_show,
+};
+
+static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
+{
+ return seq_open_private(file, &wakeup_sources_stats_seq_ops, sizeof(int));
+}
+
+static const struct file_operations wakeup_sources_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = wakeup_sources_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
+
+static int __init wakeup_sources_debugfs_init(void)
+{
+ debugfs_create_file("wakeup_sources", S_IRUGO, NULL, NULL,
+ &wakeup_sources_stats_fops);
+ return 0;
+}
+
+postcore_initcall(wakeup_sources_debugfs_init);
diff --git a/drivers/base/power/wakeup_stats.c b/drivers/base/power/wakeup_stats.c
new file mode 100644
index 000000000..d638259b8
--- /dev/null
+++ b/drivers/base/power/wakeup_stats.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wakeup statistics in sysfs
+ *
+ * Copyright (c) 2019 Linux Foundation
+ * Copyright (c) 2019 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2019 Google Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/kdev_t.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/slab.h>
+#include <linux/timekeeping.h>
+
+#include "power.h"
+
+static struct class *wakeup_class;
+
+#define wakeup_attr(_name) \
+static ssize_t _name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct wakeup_source *ws = dev_get_drvdata(dev); \
+ \
+ return sysfs_emit(buf, "%lu\n", ws->_name); \
+} \
+static DEVICE_ATTR_RO(_name)
+
+wakeup_attr(active_count);
+wakeup_attr(event_count);
+wakeup_attr(wakeup_count);
+wakeup_attr(expire_count);
+
+static ssize_t active_time_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+ ktime_t active_time =
+ ws->active ? ktime_sub(ktime_get(), ws->last_time) : 0;
+
+ return sysfs_emit(buf, "%lld\n", ktime_to_ms(active_time));
+}
+static DEVICE_ATTR_RO(active_time_ms);
+
+static ssize_t total_time_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+ ktime_t active_time;
+ ktime_t total_time = ws->total_time;
+
+ if (ws->active) {
+ active_time = ktime_sub(ktime_get(), ws->last_time);
+ total_time = ktime_add(total_time, active_time);
+ }
+
+ return sysfs_emit(buf, "%lld\n", ktime_to_ms(total_time));
+}
+static DEVICE_ATTR_RO(total_time_ms);
+
+static ssize_t max_time_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+ ktime_t active_time;
+ ktime_t max_time = ws->max_time;
+
+ if (ws->active) {
+ active_time = ktime_sub(ktime_get(), ws->last_time);
+ if (active_time > max_time)
+ max_time = active_time;
+ }
+
+ return sysfs_emit(buf, "%lld\n", ktime_to_ms(max_time));
+}
+static DEVICE_ATTR_RO(max_time_ms);
+
+static ssize_t last_change_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lld\n", ktime_to_ms(ws->last_time));
+}
+static DEVICE_ATTR_RO(last_change_ms);
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", ws->name);
+}
+static DEVICE_ATTR_RO(name);
+
+static ssize_t prevent_suspend_time_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+ ktime_t prevent_sleep_time = ws->prevent_sleep_time;
+
+ if (ws->active && ws->autosleep_enabled) {
+ prevent_sleep_time = ktime_add(prevent_sleep_time,
+ ktime_sub(ktime_get(), ws->start_prevent_time));
+ }
+
+ return sysfs_emit(buf, "%lld\n", ktime_to_ms(prevent_sleep_time));
+}
+static DEVICE_ATTR_RO(prevent_suspend_time_ms);
+
+static struct attribute *wakeup_source_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_active_count.attr,
+ &dev_attr_event_count.attr,
+ &dev_attr_wakeup_count.attr,
+ &dev_attr_expire_count.attr,
+ &dev_attr_active_time_ms.attr,
+ &dev_attr_total_time_ms.attr,
+ &dev_attr_max_time_ms.attr,
+ &dev_attr_last_change_ms.attr,
+ &dev_attr_prevent_suspend_time_ms.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(wakeup_source);
+
+static void device_create_release(struct device *dev)
+{
+ kfree(dev);
+}
+
+static struct device *wakeup_source_device_create(struct device *parent,
+ struct wakeup_source *ws)
+{
+ struct device *dev = NULL;
+ int retval = -ENODEV;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ device_initialize(dev);
+ dev->devt = MKDEV(0, 0);
+ dev->class = wakeup_class;
+ dev->parent = parent;
+ dev->groups = wakeup_source_groups;
+ dev->release = device_create_release;
+ dev_set_drvdata(dev, ws);
+ device_set_pm_not_required(dev);
+
+ retval = kobject_set_name(&dev->kobj, "wakeup%d", ws->id);
+ if (retval)
+ goto error;
+
+ retval = device_add(dev);
+ if (retval)
+ goto error;
+
+ return dev;
+
+error:
+ put_device(dev);
+ return ERR_PTR(retval);
+}
+
+/**
+ * wakeup_source_sysfs_add - Add wakeup_source attributes to sysfs.
+ * @parent: Device given wakeup source is associated with (or NULL if virtual).
+ * @ws: Wakeup source to be added in sysfs.
+ */
+int wakeup_source_sysfs_add(struct device *parent, struct wakeup_source *ws)
+{
+ struct device *dev;
+
+ dev = wakeup_source_device_create(parent, ws);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+ ws->dev = dev;
+
+ return 0;
+}
+
+/**
+ * pm_wakeup_source_sysfs_add - Add wakeup_source attributes to sysfs
+ * for a device if they're missing.
+ * @parent: Device given wakeup source is associated with
+ */
+int pm_wakeup_source_sysfs_add(struct device *parent)
+{
+ if (!parent->power.wakeup || parent->power.wakeup->dev)
+ return 0;
+
+ return wakeup_source_sysfs_add(parent, parent->power.wakeup);
+}
+
+/**
+ * wakeup_source_sysfs_remove - Remove wakeup_source attributes from sysfs.
+ * @ws: Wakeup source to be removed from sysfs.
+ */
+void wakeup_source_sysfs_remove(struct wakeup_source *ws)
+{
+ device_unregister(ws->dev);
+}
+
+static int __init wakeup_sources_sysfs_init(void)
+{
+ wakeup_class = class_create(THIS_MODULE, "wakeup");
+
+ return PTR_ERR_OR_ZERO(wakeup_class);
+}
+postcore_initcall(wakeup_sources_sysfs_init);