diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:27 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:27 +0000 |
commit | 34996e42f82bfd60bc2c191e5cae3c6ab233ec6c (patch) | |
tree | 62db60558cbf089714b48daeabca82bf2b20b20e /drivers/base | |
parent | Adding debian version 6.8.12-1. (diff) | |
download | linux-34996e42f82bfd60bc2c191e5cae3c6ab233ec6c.tar.xz linux-34996e42f82bfd60bc2c191e5cae3c6ab233ec6c.zip |
Merging upstream version 6.9.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/base.h | 11 | ||||
-rw-r--r-- | drivers/base/bus.c | 9 | ||||
-rw-r--r-- | drivers/base/cacheinfo.c | 50 | ||||
-rw-r--r-- | drivers/base/component.c | 4 | ||||
-rw-r--r-- | drivers/base/core.c | 75 | ||||
-rw-r--r-- | drivers/base/cpu.c | 8 | ||||
-rw-r--r-- | drivers/base/dd.c | 32 | ||||
-rw-r--r-- | drivers/base/firmware_loader/main.c | 16 | ||||
-rw-r--r-- | drivers/base/memory.c | 23 | ||||
-rw-r--r-- | drivers/base/module.c | 42 | ||||
-rw-r--r-- | drivers/base/node.c | 1 | ||||
-rw-r--r-- | drivers/base/platform-msi.c | 125 | ||||
-rw-r--r-- | drivers/base/power/common.c | 134 | ||||
-rw-r--r-- | drivers/base/power/main.c | 267 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 36 | ||||
-rw-r--r-- | drivers/base/property.c | 67 | ||||
-rw-r--r-- | drivers/base/regmap/internal.h | 1 | ||||
-rw-r--r-- | drivers/base/regmap/regcache-flat.c | 2 | ||||
-rw-r--r-- | drivers/base/regmap/regcache.c | 4 | ||||
-rw-r--r-- | drivers/base/regmap/regmap-kunit.c | 66 | ||||
-rw-r--r-- | drivers/base/regmap/regmap.c | 10 | ||||
-rw-r--r-- | drivers/base/swnode.c | 13 |
22 files changed, 703 insertions, 293 deletions
diff --git a/drivers/base/base.h b/drivers/base/base.h index eb4c0ace92..db4f910e8e 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -192,11 +192,14 @@ extern struct kset *devices_kset; void devices_kset_move_last(struct device *dev); #if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS) -void module_add_driver(struct module *mod, struct device_driver *drv); +int module_add_driver(struct module *mod, struct device_driver *drv); void module_remove_driver(struct device_driver *drv); #else -static inline void module_add_driver(struct module *mod, - struct device_driver *drv) { } +static inline int module_add_driver(struct module *mod, + struct device_driver *drv) +{ + return 0; +} static inline void module_remove_driver(struct device_driver *drv) { } #endif @@ -207,7 +210,7 @@ static inline int devtmpfs_init(void) { return 0; } #endif #ifdef CONFIG_BLOCK -extern struct class block_class; +extern const struct class block_class; static inline bool is_blockdev(struct device *dev) { return dev->class == &block_class; diff --git a/drivers/base/bus.c b/drivers/base/bus.c index daee55c9b2..ffea0728b8 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -674,7 +674,12 @@ int bus_add_driver(struct device_driver *drv) if (error) goto out_del_list; } - module_add_driver(drv->owner, drv); + error = module_add_driver(drv->owner, drv); + if (error) { + printk(KERN_ERR "%s: failed to create module links for %s\n", + __func__, drv->name); + goto out_detach; + } error = driver_create_file(drv, &driver_attr_uevent); if (error) { @@ -699,6 +704,8 @@ int bus_add_driver(struct device_driver *drv) return 0; +out_detach: + driver_detach(drv); out_del_list: klist_del(&priv->knode_bus); out_unregister: diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index f1e79263fe..23b8cba4a2 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -898,6 +898,37 @@ err: return rc; } +static unsigned int cpu_map_shared_cache(bool online, unsigned int cpu, + cpumask_t **map) +{ + struct cacheinfo *llc, *sib_llc; + unsigned int sibling; + + if (!last_level_cache_is_valid(cpu)) + return 0; + + llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1); + + if (llc->type != CACHE_TYPE_DATA && llc->type != CACHE_TYPE_UNIFIED) + return 0; + + if (online) { + *map = &llc->shared_cpu_map; + return cpumask_weight(*map); + } + + /* shared_cpu_map of offlined CPU will be cleared, so use sibling map */ + for_each_cpu(sibling, &llc->shared_cpu_map) { + if (sibling == cpu || !last_level_cache_is_valid(sibling)) + continue; + sib_llc = per_cpu_cacheinfo_idx(sibling, cache_leaves(sibling) - 1); + *map = &sib_llc->shared_cpu_map; + return cpumask_weight(*map); + } + + return 0; +} + /* * Calculate the size of the per-CPU data cache slice. This can be * used to estimate the size of the data cache slice that can be used @@ -929,28 +960,31 @@ static void update_per_cpu_data_slice_size_cpu(unsigned int cpu) ci->per_cpu_data_slice_size = llc->size / nr_shared; } -static void update_per_cpu_data_slice_size(bool cpu_online, unsigned int cpu) +static void update_per_cpu_data_slice_size(bool cpu_online, unsigned int cpu, + cpumask_t *cpu_map) { unsigned int icpu; - for_each_online_cpu(icpu) { + for_each_cpu(icpu, cpu_map) { if (!cpu_online && icpu == cpu) continue; update_per_cpu_data_slice_size_cpu(icpu); + setup_pcp_cacheinfo(icpu); } } static int cacheinfo_cpu_online(unsigned int cpu) { int rc = detect_cache_attributes(cpu); + cpumask_t *cpu_map; if (rc) return rc; rc = cache_add_dev(cpu); if (rc) goto err; - update_per_cpu_data_slice_size(true, cpu); - setup_pcp_cacheinfo(); + if (cpu_map_shared_cache(true, cpu, &cpu_map)) + update_per_cpu_data_slice_size(true, cpu, cpu_map); return 0; err: free_cache_attributes(cpu); @@ -959,12 +993,16 @@ err: static int cacheinfo_cpu_pre_down(unsigned int cpu) { + cpumask_t *cpu_map; + unsigned int nr_shared; + + nr_shared = cpu_map_shared_cache(false, cpu, &cpu_map); if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map)) cpu_cache_sysfs_exit(cpu); free_cache_attributes(cpu); - update_per_cpu_data_slice_size(false, cpu); - setup_pcp_cacheinfo(); + if (nr_shared > 1) + update_per_cpu_data_slice_size(false, cpu, cpu_map); return 0; } diff --git a/drivers/base/component.c b/drivers/base/component.c index 7dbf14a1d9..741497324d 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -751,7 +751,7 @@ static int __component_add(struct device *dev, const struct component_ops *ops, * component_bind_all(). See also &struct component_ops. * * @subcomponent must be nonzero and is used to differentiate between multiple - * components registerd on the same device @dev. These components are match + * components registered on the same device @dev. These components are match * using component_match_add_typed(). * * The component needs to be unregistered at driver unload/disconnect by @@ -781,7 +781,7 @@ EXPORT_SYMBOL_GPL(component_add_typed); * The component needs to be unregistered at driver unload/disconnect by * calling component_del(). * - * See also component_add_typed() for a variant that allows multipled different + * See also component_add_typed() for a variant that allows multiple different * components on the same device. */ int component_add(struct device *dev, const struct component_ops *ops) diff --git a/drivers/base/core.c b/drivers/base/core.c index 7f39c813ce..8e3bd230b1 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -93,12 +93,13 @@ static int __fwnode_link_add(struct fwnode_handle *con, return 0; } -int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup) +int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup, + u8 flags) { int ret; mutex_lock(&fwnode_link_lock); - ret = __fwnode_link_add(con, sup, 0); + ret = __fwnode_link_add(con, sup, flags); mutex_unlock(&fwnode_link_lock); return ret; } @@ -1026,7 +1027,8 @@ static struct fwnode_handle *fwnode_links_check_suppliers( return NULL; list_for_each_entry(link, &fwnode->suppliers, c_hook) - if (!(link->flags & FWLINK_FLAG_CYCLE)) + if (!(link->flags & + (FWLINK_FLAG_CYCLE | FWLINK_FLAG_IGNORE))) return link->supplier; return NULL; @@ -1886,6 +1888,7 @@ static void fw_devlink_unblock_consumers(struct device *dev) device_links_write_unlock(); } +#define get_dev_from_fwnode(fwnode) get_device((fwnode)->dev) static bool fwnode_init_without_drv(struct fwnode_handle *fwnode) { @@ -1917,6 +1920,63 @@ static bool fwnode_ancestor_init_without_drv(struct fwnode_handle *fwnode) } /** + * fwnode_is_ancestor_of - Test if @ancestor is ancestor of @child + * @ancestor: Firmware which is tested for being an ancestor + * @child: Firmware which is tested for being the child + * + * A node is considered an ancestor of itself too. + * + * Return: true if @ancestor is an ancestor of @child. Otherwise, returns false. + */ +static bool fwnode_is_ancestor_of(const struct fwnode_handle *ancestor, + const struct fwnode_handle *child) +{ + struct fwnode_handle *parent; + + if (IS_ERR_OR_NULL(ancestor)) + return false; + + if (child == ancestor) + return true; + + fwnode_for_each_parent_node(child, parent) { + if (parent == ancestor) { + fwnode_handle_put(parent); + return true; + } + } + return false; +} + +/** + * fwnode_get_next_parent_dev - Find device of closest ancestor fwnode + * @fwnode: firmware node + * + * Given a firmware node (@fwnode), this function finds its closest ancestor + * firmware node that has a corresponding struct device and returns that struct + * device. + * + * The caller is responsible for calling put_device() on the returned device + * pointer. + * + * Return: a pointer to the device of the @fwnode's closest ancestor. + */ +static struct device *fwnode_get_next_parent_dev(const struct fwnode_handle *fwnode) +{ + struct fwnode_handle *parent; + struct device *dev; + + fwnode_for_each_parent_node(fwnode, parent) { + dev = get_dev_from_fwnode(parent); + if (dev) { + fwnode_handle_put(parent); + return dev; + } + } + return NULL; +} + +/** * __fw_devlink_relax_cycles - Relax and mark dependency cycles. * @con: Potential consumer device. * @sup_handle: Potential supplier's fwnode. @@ -1977,6 +2037,9 @@ static bool __fw_devlink_relax_cycles(struct device *con, } list_for_each_entry(link, &sup_handle->suppliers, c_hook) { + if (link->flags & FWLINK_FLAG_IGNORE) + continue; + if (__fw_devlink_relax_cycles(con, link->supplier)) { __fwnode_link_cycle(link); ret = true; @@ -2055,6 +2118,9 @@ static int fw_devlink_create_devlink(struct device *con, int ret = 0; u32 flags; + if (link->flags & FWLINK_FLAG_IGNORE) + return 0; + if (con->fwnode == link->consumer) flags = fw_devlink_get_flags(link->flags); else @@ -2672,8 +2738,11 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr, if (!env) return -ENOMEM; + /* Synchronize with really_probe() */ + device_lock(dev); /* let the kset specific function add its keys */ retval = kset->uevent_ops->uevent(&dev->kobj, env); + device_unlock(dev); if (retval) goto out; diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 0b33e81f9c..56fba44ba3 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -144,7 +144,7 @@ static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store); #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ #endif /* CONFIG_HOTPLUG_CPU */ -#ifdef CONFIG_KEXEC_CORE +#ifdef CONFIG_CRASH_DUMP #include <linux/kexec.h> static ssize_t crash_notes_show(struct device *dev, @@ -189,14 +189,14 @@ static const struct attribute_group crash_note_cpu_attr_group = { #endif static const struct attribute_group *common_cpu_attr_groups[] = { -#ifdef CONFIG_KEXEC_CORE +#ifdef CONFIG_CRASH_DUMP &crash_note_cpu_attr_group, #endif NULL }; static const struct attribute_group *hotplugable_cpu_attr_groups[] = { -#ifdef CONFIG_KEXEC_CORE +#ifdef CONFIG_CRASH_DUMP &crash_note_cpu_attr_group, #endif NULL @@ -366,7 +366,7 @@ static int cpu_uevent(const struct device *dev, struct kobj_uevent_env *env) } #endif -struct bus_type cpu_subsys = { +const struct bus_type cpu_subsys = { .name = "cpu", .dev_name = "cpu", .match = cpu_subsys_match, diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 85152537db..83d352394f 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -313,7 +313,7 @@ static void deferred_probe_timeout_work_func(struct work_struct *work) mutex_lock(&deferred_probe_mutex); list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe) - dev_info(p->device, "deferred probe pending: %s", p->deferred_probe_reason ?: "(reason unknown)\n"); + dev_warn(p->device, "deferred probe pending: %s", p->deferred_probe_reason ?: "(reason unknown)\n"); mutex_unlock(&deferred_probe_mutex); fw_devlink_probing_done(); @@ -397,13 +397,12 @@ bool device_is_bound(struct device *dev) static void driver_bound(struct device *dev) { if (device_is_bound(dev)) { - pr_warn("%s: device %s already bound\n", - __func__, kobject_name(&dev->kobj)); + dev_warn(dev, "%s: device already bound\n", __func__); return; } - pr_debug("driver: '%s': %s: bound to device '%s'\n", dev->driver->name, - __func__, dev_name(dev)); + dev_dbg(dev, "driver: '%s': %s: bound to device\n", dev->driver->name, + __func__); klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices); device_links_driver_bound(dev); @@ -587,13 +586,13 @@ static int call_driver_probe(struct device *dev, struct device_driver *drv) break; case -ENODEV: case -ENXIO: - pr_debug("%s: probe of %s rejects match %d\n", - drv->name, dev_name(dev), ret); + dev_dbg(dev, "probe with driver %s rejects match %d\n", + drv->name, ret); break; default: /* driver matched but the probe failed */ - pr_warn("%s: probe of %s failed with error %d\n", - drv->name, dev_name(dev), ret); + dev_err(dev, "probe with driver %s failed with error %d\n", + drv->name, ret); break; } @@ -620,8 +619,8 @@ static int really_probe(struct device *dev, struct device_driver *drv) if (link_ret == -EPROBE_DEFER) return link_ret; - pr_debug("bus: '%s': %s: probing driver %s with device %s\n", - drv->bus->name, __func__, drv->name, dev_name(dev)); + dev_dbg(dev, "bus: '%s': %s: probing driver %s with device\n", + drv->bus->name, __func__, drv->name); if (!list_empty(&dev->devres_head)) { dev_crit(dev, "Resources present before probing\n"); ret = -EBUSY; @@ -644,8 +643,7 @@ re_probe: ret = driver_sysfs_add(dev); if (ret) { - pr_err("%s: driver_sysfs_add(%s) failed\n", - __func__, dev_name(dev)); + dev_err(dev, "%s: driver_sysfs_add failed\n", __func__); goto sysfs_failed; } @@ -706,8 +704,8 @@ re_probe: dev->pm_domain->sync(dev); driver_bound(dev); - pr_debug("bus: '%s': %s: bound device %s to driver %s\n", - drv->bus->name, __func__, dev_name(dev), drv->name); + dev_dbg(dev, "bus: '%s': %s: bound device to driver %s\n", + drv->bus->name, __func__, drv->name); goto done; dev_sysfs_state_synced_failed: @@ -786,8 +784,8 @@ static int __driver_probe_device(struct device_driver *drv, struct device *dev) return -EBUSY; dev->can_match = true; - pr_debug("bus: '%s': %s: matched device %s with driver %s\n", - drv->bus->name, __func__, dev_name(dev), drv->name); + dev_dbg(dev, "bus: '%s': %s: matched device with driver %s\n", + drv->bus->name, __func__, drv->name); pm_runtime_get_suppliers(dev); if (dev->parent) diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index ea28102d42..da8ca01d01 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -551,12 +551,16 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv, file_size_ptr, READING_FIRMWARE); if (rc < 0) { - if (rc != -ENOENT) - dev_warn(device, "loading %s failed with error %d\n", - path, rc); - else - dev_dbg(device, "loading %s failed for no such file or directory.\n", - path); + if (!(fw_priv->opt_flags & FW_OPT_NO_WARN)) { + if (rc != -ENOENT) + dev_warn(device, + "loading %s failed with error %d\n", + path, rc); + else + dev_dbg(device, + "loading %s failed for no such file or directory.\n", + path); + } continue; } size = rc; diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 14f964a771..c0436f46cf 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -188,6 +188,7 @@ static int memory_block_online(struct memory_block *mem) unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; unsigned long nr_vmemmap_pages = 0; + struct memory_notify arg; struct zone *zone; int ret; @@ -207,9 +208,19 @@ static int memory_block_online(struct memory_block *mem) if (mem->altmap) nr_vmemmap_pages = mem->altmap->free; + arg.altmap_start_pfn = start_pfn; + arg.altmap_nr_pages = nr_vmemmap_pages; + arg.start_pfn = start_pfn + nr_vmemmap_pages; + arg.nr_pages = nr_pages - nr_vmemmap_pages; mem_hotplug_begin(); + ret = memory_notify(MEM_PREPARE_ONLINE, &arg); + ret = notifier_to_errno(ret); + if (ret) + goto out_notifier; + if (nr_vmemmap_pages) { - ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone); + ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, + zone, mem->altmap->inaccessible); if (ret) goto out; } @@ -231,7 +242,11 @@ static int memory_block_online(struct memory_block *mem) nr_vmemmap_pages); mem->zone = zone; + mem_hotplug_done(); + return ret; out: + memory_notify(MEM_FINISH_OFFLINE, &arg); +out_notifier: mem_hotplug_done(); return ret; } @@ -244,6 +259,7 @@ static int memory_block_offline(struct memory_block *mem) unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; unsigned long nr_vmemmap_pages = 0; + struct memory_notify arg; int ret; if (!mem->zone) @@ -275,6 +291,11 @@ static int memory_block_offline(struct memory_block *mem) mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages); mem->zone = NULL; + arg.altmap_start_pfn = start_pfn; + arg.altmap_nr_pages = nr_vmemmap_pages; + arg.start_pfn = start_pfn + nr_vmemmap_pages; + arg.nr_pages = nr_pages - nr_vmemmap_pages; + memory_notify(MEM_FINISH_OFFLINE, &arg); out: mem_hotplug_done(); return ret; diff --git a/drivers/base/module.c b/drivers/base/module.c index 46ad4d6367..a1b55da071 100644 --- a/drivers/base/module.c +++ b/drivers/base/module.c @@ -30,14 +30,14 @@ static void module_create_drivers_dir(struct module_kobject *mk) mutex_unlock(&drivers_dir_mutex); } -void module_add_driver(struct module *mod, struct device_driver *drv) +int module_add_driver(struct module *mod, struct device_driver *drv) { char *driver_name; - int no_warn; struct module_kobject *mk = NULL; + int ret; if (!drv) - return; + return 0; if (mod) mk = &mod->mkobj; @@ -56,17 +56,37 @@ void module_add_driver(struct module *mod, struct device_driver *drv) } if (!mk) - return; + return 0; + + ret = sysfs_create_link(&drv->p->kobj, &mk->kobj, "module"); + if (ret) + return ret; - /* Don't check return codes; these calls are idempotent */ - no_warn = sysfs_create_link(&drv->p->kobj, &mk->kobj, "module"); driver_name = make_driver_name(drv); - if (driver_name) { - module_create_drivers_dir(mk); - no_warn = sysfs_create_link(mk->drivers_dir, &drv->p->kobj, - driver_name); - kfree(driver_name); + if (!driver_name) { + ret = -ENOMEM; + goto out; + } + + module_create_drivers_dir(mk); + if (!mk->drivers_dir) { + ret = -EINVAL; + goto out; } + + ret = sysfs_create_link(mk->drivers_dir, &drv->p->kobj, driver_name); + if (ret) + goto out; + + kfree(driver_name); + + return 0; +out: + sysfs_remove_link(&drv->p->kobj, "module"); + sysfs_remove_link(mk->drivers_dir, driver_name); + kfree(driver_name); + + return ret; } void module_remove_driver(struct device_driver *drv) diff --git a/drivers/base/node.c b/drivers/base/node.c index a73b0c9a40..eb72580288 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -215,6 +215,7 @@ void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord, } } } +EXPORT_SYMBOL_GPL(node_set_perf_attrs); /** * struct node_cache_info - Internal tracking for memory node caches diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c index f37ad34c80..11f5fdf65b 100644 --- a/drivers/base/platform-msi.c +++ b/drivers/base/platform-msi.c @@ -13,6 +13,8 @@ #include <linux/msi.h> #include <linux/slab.h> +/* Begin of removal area. Once everything is converted over. Cleanup the includes too! */ + #define DEV_ID_SHIFT 21 #define MAX_DEV_MSIS (1 << (32 - DEV_ID_SHIFT)) @@ -172,8 +174,8 @@ static int platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec, if (!datap) return -ENOMEM; - datap->devid = ida_simple_get(&platform_msi_devid_ida, - 0, 1 << DEV_ID_SHIFT, GFP_KERNEL); + datap->devid = ida_alloc_max(&platform_msi_devid_ida, + (1 << DEV_ID_SHIFT) - 1, GFP_KERNEL); if (datap->devid < 0) { err = datap->devid; kfree(datap); @@ -191,7 +193,7 @@ static void platform_msi_free_priv_data(struct device *dev) struct platform_msi_priv_data *data = dev->msi.data->platform_data; dev->msi.data->platform_data = NULL; - ida_simple_remove(&platform_msi_devid_ida, data->devid); + ida_free(&platform_msi_devid_ida, data->devid); kfree(data); } @@ -204,8 +206,8 @@ static void platform_msi_free_priv_data(struct device *dev) * Returns: * Zero for success, or an error code in case of failure */ -int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, - irq_write_msi_msg_t write_msi_msg) +static int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, + irq_write_msi_msg_t write_msi_msg) { int err; @@ -219,18 +221,6 @@ int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, return err; } -EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs); - -/** - * platform_msi_domain_free_irqs - Free MSI interrupts for @dev - * @dev: The device for which to free interrupts - */ -void platform_msi_domain_free_irqs(struct device *dev) -{ - msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN); - platform_msi_free_priv_data(dev); -} -EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs); /** * platform_msi_get_host_data - Query the private data associated with @@ -350,3 +340,104 @@ int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int vir return msi_domain_populate_irqs(domain->parent, dev, virq, nr_irqs, &data->arg); } + +/* End of removal area */ + +/* Real per device domain interfaces */ + +/* + * This indirection can go when platform_device_msi_init_and_alloc_irqs() + * is switched to a proper irq_chip::irq_write_msi_msg() callback. Keep it + * simple for now. + */ +static void platform_msi_write_msi_msg(struct irq_data *d, struct msi_msg *msg) +{ + irq_write_msi_msg_t cb = d->chip_data; + + cb(irq_data_get_msi_desc(d), msg); +} + +static void platform_msi_set_desc_byindex(msi_alloc_info_t *arg, struct msi_desc *desc) +{ + arg->desc = desc; + arg->hwirq = desc->msi_index; +} + +static const struct msi_domain_template platform_msi_template = { + .chip = { + .name = "pMSI", + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_write_msi_msg = platform_msi_write_msi_msg, + /* The rest is filled in by the platform MSI parent */ + }, + + .ops = { + .set_desc = platform_msi_set_desc_byindex, + }, + + .info = { + .bus_token = DOMAIN_BUS_DEVICE_MSI, + }, +}; + +/** + * platform_device_msi_init_and_alloc_irqs - Initialize platform device MSI + * and allocate interrupts for @dev + * @dev: The device for which to allocate interrupts + * @nvec: The number of interrupts to allocate + * @write_msi_msg: Callback to write an interrupt message for @dev + * + * Returns: + * Zero for success, or an error code in case of failure + * + * This creates a MSI domain on @dev which has @dev->msi.domain as + * parent. The parent domain sets up the new domain. The domain has + * a fixed size of @nvec. The domain is managed by devres and will + * be removed when the device is removed. + * + * Note: For migration purposes this falls back to the original platform_msi code + * up to the point where all platforms have been converted to the MSI + * parent model. + */ +int platform_device_msi_init_and_alloc_irqs(struct device *dev, unsigned int nvec, + irq_write_msi_msg_t write_msi_msg) +{ + struct irq_domain *domain = dev->msi.domain; + + if (!domain || !write_msi_msg) + return -EINVAL; + + /* Migration support. Will go away once everything is converted */ + if (!irq_domain_is_msi_parent(domain)) + return platform_msi_domain_alloc_irqs(dev, nvec, write_msi_msg); + + /* + * @write_msi_msg is stored in the resulting msi_domain_info::data. + * The underlying domain creation mechanism will assign that + * callback to the resulting irq chip. + */ + if (!msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN, + &platform_msi_template, + nvec, NULL, write_msi_msg)) + return -ENODEV; + + return msi_domain_alloc_irqs_range(dev, MSI_DEFAULT_DOMAIN, 0, nvec - 1); +} +EXPORT_SYMBOL_GPL(platform_device_msi_init_and_alloc_irqs); + +/** + * platform_device_msi_free_irqs_all - Free all interrupts for @dev + * @dev: The device for which to free interrupts + */ +void platform_device_msi_free_irqs_all(struct device *dev) +{ + struct irq_domain *domain = dev->msi.domain; + + msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN); + + /* Migration support. Will go away once everything is converted */ + if (!irq_domain_is_msi_parent(domain)) + platform_msi_free_priv_data(dev); +} +EXPORT_SYMBOL_GPL(platform_device_msi_free_irqs_all); diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index 44ec20918a..327d168dd3 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c @@ -168,6 +168,115 @@ struct device *dev_pm_domain_attach_by_name(struct device *dev, EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_name); /** + * dev_pm_domain_attach_list - Associate a device with its PM domains. + * @dev: The device used to lookup the PM domains for. + * @data: The data used for attaching to the PM domains. + * @list: An out-parameter with an allocated list of attached PM domains. + * + * This function helps to attach a device to its multiple PM domains. The + * caller, which is typically a driver's probe function, may provide a list of + * names for the PM domains that we should try to attach the device to, but it + * may also provide an empty list, in case the attach should be done for all of + * the available PM domains. + * + * Callers must ensure proper synchronization of this function with power + * management callbacks. + * + * Returns the number of attached PM domains or a negative error code in case of + * a failure. Note that, to detach the list of PM domains, the driver shall call + * dev_pm_domain_detach_list(), typically during the remove phase. + */ +int dev_pm_domain_attach_list(struct device *dev, + const struct dev_pm_domain_attach_data *data, + struct dev_pm_domain_list **list) +{ + struct device_node *np = dev->of_node; + struct dev_pm_domain_list *pds; + struct device *pd_dev = NULL; + int ret, i, num_pds = 0; + bool by_id = true; + u32 pd_flags = data ? data->pd_flags : 0; + u32 link_flags = pd_flags & PD_FLAG_NO_DEV_LINK ? 0 : + DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME; + + if (dev->pm_domain) + return -EEXIST; + + /* For now this is limited to OF based platforms. */ + if (!np) + return 0; + + if (data && data->pd_names) { + num_pds = data->num_pd_names; + by_id = false; + } else { + num_pds = of_count_phandle_with_args(np, "power-domains", + "#power-domain-cells"); + } + + if (num_pds <= 0) + return 0; + + pds = devm_kzalloc(dev, sizeof(*pds), GFP_KERNEL); + if (!pds) + return -ENOMEM; + + pds->pd_devs = devm_kcalloc(dev, num_pds, sizeof(*pds->pd_devs), + GFP_KERNEL); + if (!pds->pd_devs) + return -ENOMEM; + + pds->pd_links = devm_kcalloc(dev, num_pds, sizeof(*pds->pd_links), + GFP_KERNEL); + if (!pds->pd_links) + return -ENOMEM; + + if (link_flags && pd_flags & PD_FLAG_DEV_LINK_ON) + link_flags |= DL_FLAG_RPM_ACTIVE; + + for (i = 0; i < num_pds; i++) { + if (by_id) + pd_dev = dev_pm_domain_attach_by_id(dev, i); + else + pd_dev = dev_pm_domain_attach_by_name(dev, + data->pd_names[i]); + if (IS_ERR_OR_NULL(pd_dev)) { + ret = pd_dev ? PTR_ERR(pd_dev) : -ENODEV; + goto err_attach; + } + + if (link_flags) { + struct device_link *link; + + link = device_link_add(dev, pd_dev, link_flags); + if (!link) { + ret = -ENODEV; + goto err_link; + } + + pds->pd_links[i] = link; + } + + pds->pd_devs[i] = pd_dev; + } + + pds->num_pds = num_pds; + *list = pds; + return num_pds; + +err_link: + dev_pm_domain_detach(pd_dev, true); +err_attach: + while (--i >= 0) { + if (pds->pd_links[i]) + device_link_del(pds->pd_links[i]); + dev_pm_domain_detach(pds->pd_devs[i], true); + } + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_domain_attach_list); + +/** * dev_pm_domain_detach - Detach a device from its PM domain. * @dev: Device to detach. * @power_off: Used to indicate whether we should power off the device. @@ -188,6 +297,31 @@ void dev_pm_domain_detach(struct device *dev, bool power_off) EXPORT_SYMBOL_GPL(dev_pm_domain_detach); /** + * dev_pm_domain_detach_list - Detach a list of PM domains. + * @list: The list of PM domains to detach. + * + * This function reverse the actions from dev_pm_domain_attach_list(). + * Typically it should be invoked during the remove phase from drivers. + * + * Callers must ensure proper synchronization of this function with power + * management callbacks. + */ +void dev_pm_domain_detach_list(struct dev_pm_domain_list *list) +{ + int i; + + if (!list) + return; + + for (i = 0; i < list->num_pds; i++) { + if (list->pd_links[i]) + device_link_del(list->pd_links[i]); + dev_pm_domain_detach(list->pd_devs[i], true); + } +} +EXPORT_SYMBOL_GPL(dev_pm_domain_detach_list); + +/** * dev_pm_domain_start - Start the device through its PM domain. * @dev: Device to start. * diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index fadcd0379d..5679f966f6 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -60,7 +60,6 @@ static LIST_HEAD(dpm_suspended_list); static LIST_HEAD(dpm_late_early_list); static LIST_HEAD(dpm_noirq_list); -struct suspend_stats suspend_stats; static DEFINE_MUTEX(dpm_list_mtx); static pm_message_t pm_transition; @@ -578,6 +577,35 @@ bool dev_pm_skip_resume(struct device *dev) return !dev->power.must_resume; } +static bool is_async(struct device *dev) +{ + return dev->power.async_suspend && pm_async_enabled + && !pm_trace_is_enabled(); +} + +static bool dpm_async_fn(struct device *dev, async_func_t func) +{ + reinit_completion(&dev->power.completion); + + if (is_async(dev)) { + dev->power.async_in_progress = true; + + get_device(dev); + + if (async_schedule_dev_nocall(func, dev)) + return true; + + put_device(dev); + } + /* + * Because async_schedule_dev_nocall() above has returned false or it + * has not been called at all, func() is not running and it is safe to + * update the async_in_progress flag without extra synchronization. + */ + dev->power.async_in_progress = false; + return false; +} + /** * device_resume_noirq - Execute a "noirq resume" callback for given device. * @dev: Device to handle. @@ -657,42 +685,12 @@ Out: TRACE_RESUME(error); if (error) { - suspend_stats.failed_resume_noirq++; - dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); + async_error = error; dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async noirq" : " noirq", error); } } -static bool is_async(struct device *dev) -{ - return dev->power.async_suspend && pm_async_enabled - && !pm_trace_is_enabled(); -} - -static bool dpm_async_fn(struct device *dev, async_func_t func) -{ - reinit_completion(&dev->power.completion); - - if (is_async(dev)) { - dev->power.async_in_progress = true; - - get_device(dev); - - if (async_schedule_dev_nocall(func, dev)) - return true; - - put_device(dev); - } - /* - * Because async_schedule_dev_nocall() above has returned false or it - * has not been called at all, func() is not running and it is safe to - * update the async_in_progress flag without extra synchronization. - */ - dev->power.async_in_progress = false; - return false; -} - static void async_resume_noirq(void *data, async_cookie_t cookie) { struct device *dev = data; @@ -707,9 +705,12 @@ static void dpm_noirq_resume_devices(pm_message_t state) ktime_t starttime = ktime_get(); trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true); - mutex_lock(&dpm_list_mtx); + + async_error = 0; pm_transition = state; + mutex_lock(&dpm_list_mtx); + /* * Trigger the resume of "async" devices upfront so they don't have to * wait for the "non-async" ones they don't depend on. @@ -736,6 +737,9 @@ static void dpm_noirq_resume_devices(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); dpm_show_time(starttime, state, 0, "noirq"); + if (async_error) + dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); + trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); } @@ -817,8 +821,7 @@ Out: complete_all(&dev->power.completion); if (error) { - suspend_stats.failed_resume_early++; - dpm_save_failed_step(SUSPEND_RESUME_EARLY); + async_error = error; dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async early" : " early", error); } @@ -842,9 +845,12 @@ void dpm_resume_early(pm_message_t state) ktime_t starttime = ktime_get(); trace_suspend_resume(TPS("dpm_resume_early"), state.event, true); - mutex_lock(&dpm_list_mtx); + + async_error = 0; pm_transition = state; + mutex_lock(&dpm_list_mtx); + /* * Trigger the resume of "async" devices upfront so they don't have to * wait for the "non-async" ones they don't depend on. @@ -871,6 +877,9 @@ void dpm_resume_early(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); dpm_show_time(starttime, state, 0, "early"); + if (async_error) + dpm_save_failed_step(SUSPEND_RESUME_EARLY); + trace_suspend_resume(TPS("dpm_resume_early"), state.event, false); } @@ -974,8 +983,7 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) TRACE_RESUME(error); if (error) { - suspend_stats.failed_resume++; - dpm_save_failed_step(SUSPEND_RESUME); + async_error = error; dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async" : "", error); } @@ -1004,10 +1012,11 @@ void dpm_resume(pm_message_t state) trace_suspend_resume(TPS("dpm_resume"), state.event, true); might_sleep(); - mutex_lock(&dpm_list_mtx); pm_transition = state; async_error = 0; + mutex_lock(&dpm_list_mtx); + /* * Trigger the resume of "async" devices upfront so they don't have to * wait for the "non-async" ones they don't depend on. @@ -1017,29 +1026,25 @@ void dpm_resume(pm_message_t state) while (!list_empty(&dpm_suspended_list)) { dev = to_device(dpm_suspended_list.next); - - get_device(dev); + list_move_tail(&dev->power.entry, &dpm_prepared_list); if (!dev->power.async_in_progress) { + get_device(dev); + mutex_unlock(&dpm_list_mtx); device_resume(dev, state, false); + put_device(dev); + mutex_lock(&dpm_list_mtx); } - - if (!list_empty(&dev->power.entry)) - list_move_tail(&dev->power.entry, &dpm_prepared_list); - - mutex_unlock(&dpm_list_mtx); - - put_device(dev); - - mutex_lock(&dpm_list_mtx); } mutex_unlock(&dpm_list_mtx); async_synchronize_full(); dpm_show_time(starttime, state, 0, NULL); + if (async_error) + dpm_save_failed_step(SUSPEND_RESUME); cpufreq_resume(); devfreq_resume(); @@ -1187,7 +1192,7 @@ static void dpm_superior_set_must_resume(struct device *dev) } /** - * __device_suspend_noirq - Execute a "noirq suspend" callback for given device. + * device_suspend_noirq - Execute a "noirq suspend" callback for given device. * @dev: Device to handle. * @state: PM transition of the system being carried out. * @async: If true, the device is being suspended asynchronously. @@ -1195,7 +1200,7 @@ static void dpm_superior_set_must_resume(struct device *dev) * The driver of @dev will not receive interrupts while this function is being * executed. */ -static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async) +static int device_suspend_noirq(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -1240,6 +1245,8 @@ Run: error = dpm_run_callback(callback, dev, state, info); if (error) { async_error = error; + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, state, async ? " async noirq" : " noirq", error); goto Complete; } @@ -1269,54 +1276,37 @@ Complete: static void async_suspend_noirq(void *data, async_cookie_t cookie) { struct device *dev = data; - int error; - - error = __device_suspend_noirq(dev, pm_transition, true); - if (error) { - dpm_save_failed_dev(dev_name(dev)); - pm_dev_err(dev, pm_transition, " async", error); - } + device_suspend_noirq(dev, pm_transition, true); put_device(dev); } -static int device_suspend_noirq(struct device *dev) -{ - if (dpm_async_fn(dev, async_suspend_noirq)) - return 0; - - return __device_suspend_noirq(dev, pm_transition, false); -} - static int dpm_noirq_suspend_devices(pm_message_t state) { ktime_t starttime = ktime_get(); int error = 0; trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true); - mutex_lock(&dpm_list_mtx); + pm_transition = state; async_error = 0; + mutex_lock(&dpm_list_mtx); + while (!list_empty(&dpm_late_early_list)) { struct device *dev = to_device(dpm_late_early_list.prev); - get_device(dev); - mutex_unlock(&dpm_list_mtx); - - error = device_suspend_noirq(dev); + list_move(&dev->power.entry, &dpm_noirq_list); - mutex_lock(&dpm_list_mtx); + if (dpm_async_fn(dev, async_suspend_noirq)) + continue; - if (error) { - pm_dev_err(dev, state, " noirq", error); - dpm_save_failed_dev(dev_name(dev)); - } else if (!list_empty(&dev->power.entry)) { - list_move(&dev->power.entry, &dpm_noirq_list); - } + get_device(dev); mutex_unlock(&dpm_list_mtx); + error = device_suspend_noirq(dev, state, false); + put_device(dev); mutex_lock(&dpm_list_mtx); @@ -1324,15 +1314,16 @@ static int dpm_noirq_suspend_devices(pm_message_t state) if (error || async_error) break; } + mutex_unlock(&dpm_list_mtx); + async_synchronize_full(); if (!error) error = async_error; - if (error) { - suspend_stats.failed_suspend_noirq++; + if (error) dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); - } + dpm_show_time(starttime, state, error, "noirq"); trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false); return error; @@ -1375,14 +1366,14 @@ static void dpm_propagate_wakeup_to_parent(struct device *dev) } /** - * __device_suspend_late - Execute a "late suspend" callback for given device. + * device_suspend_late - Execute a "late suspend" callback for given device. * @dev: Device to handle. * @state: PM transition of the system being carried out. * @async: If true, the device is being suspended asynchronously. * * Runtime PM is disabled for @dev while this function is being executed. */ -static int __device_suspend_late(struct device *dev, pm_message_t state, bool async) +static int device_suspend_late(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -1434,6 +1425,8 @@ Run: error = dpm_run_callback(callback, dev, state, info); if (error) { async_error = error; + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, state, async ? " async late" : " late", error); goto Complete; } dpm_propagate_wakeup_to_parent(dev); @@ -1450,24 +1443,11 @@ Complete: static void async_suspend_late(void *data, async_cookie_t cookie) { struct device *dev = data; - int error; - error = __device_suspend_late(dev, pm_transition, true); - if (error) { - dpm_save_failed_dev(dev_name(dev)); - pm_dev_err(dev, pm_transition, " async", error); - } + device_suspend_late(dev, pm_transition, true); put_device(dev); } -static int device_suspend_late(struct device *dev) -{ - if (dpm_async_fn(dev, async_suspend_late)) - return 0; - - return __device_suspend_late(dev, pm_transition, false); -} - /** * dpm_suspend_late - Execute "late suspend" callbacks for all devices. * @state: PM transition of the system being carried out. @@ -1478,32 +1458,28 @@ int dpm_suspend_late(pm_message_t state) int error = 0; trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true); - wake_up_all_idle_cpus(); - mutex_lock(&dpm_list_mtx); + pm_transition = state; async_error = 0; - while (!list_empty(&dpm_suspended_list)) { - struct device *dev = to_device(dpm_suspended_list.prev); - - get_device(dev); + wake_up_all_idle_cpus(); - mutex_unlock(&dpm_list_mtx); + mutex_lock(&dpm_list_mtx); - error = device_suspend_late(dev); + while (!list_empty(&dpm_suspended_list)) { + struct device *dev = to_device(dpm_suspended_list.prev); - mutex_lock(&dpm_list_mtx); + list_move(&dev->power.entry, &dpm_late_early_list); - if (!list_empty(&dev->power.entry)) - list_move(&dev->power.entry, &dpm_late_early_list); + if (dpm_async_fn(dev, async_suspend_late)) + continue; - if (error) { - pm_dev_err(dev, state, " late", error); - dpm_save_failed_dev(dev_name(dev)); - } + get_device(dev); mutex_unlock(&dpm_list_mtx); + error = device_suspend_late(dev, state, false); + put_device(dev); mutex_lock(&dpm_list_mtx); @@ -1511,12 +1487,14 @@ int dpm_suspend_late(pm_message_t state) if (error || async_error) break; } + mutex_unlock(&dpm_list_mtx); + async_synchronize_full(); if (!error) error = async_error; + if (error) { - suspend_stats.failed_suspend_late++; dpm_save_failed_step(SUSPEND_SUSPEND_LATE); dpm_resume_early(resume_event(state)); } @@ -1597,12 +1575,12 @@ static void dpm_clear_superiors_direct_complete(struct device *dev) } /** - * __device_suspend - Execute "suspend" callbacks for given device. + * device_suspend - Execute "suspend" callbacks for given device. * @dev: Device to handle. * @state: PM transition of the system being carried out. * @async: If true, the device is being suspended asynchronously. */ -static int __device_suspend(struct device *dev, pm_message_t state, bool async) +static int device_suspend(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -1716,8 +1694,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) dpm_watchdog_clear(&wd); Complete: - if (error) + if (error) { async_error = error; + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, state, async ? " async" : "", error); + } complete_all(&dev->power.completion); TRACE_SUSPEND(error); @@ -1727,25 +1708,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) static void async_suspend(void *data, async_cookie_t cookie) { struct device *dev = data; - int error; - - error = __device_suspend(dev, pm_transition, true); - if (error) { - dpm_save_failed_dev(dev_name(dev)); - pm_dev_err(dev, pm_transition, " async", error); - } + device_suspend(dev, pm_transition, true); put_device(dev); } -static int device_suspend(struct device *dev) -{ - if (dpm_async_fn(dev, async_suspend)) - return 0; - - return __device_suspend(dev, pm_transition, false); -} - /** * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. * @state: PM transition of the system being carried out. @@ -1761,29 +1728,25 @@ int dpm_suspend(pm_message_t state) devfreq_suspend(); cpufreq_suspend(); - mutex_lock(&dpm_list_mtx); pm_transition = state; async_error = 0; - while (!list_empty(&dpm_prepared_list)) { - struct device *dev = to_device(dpm_prepared_list.prev); - get_device(dev); + mutex_lock(&dpm_list_mtx); - mutex_unlock(&dpm_list_mtx); + while (!list_empty(&dpm_prepared_list)) { + struct device *dev = to_device(dpm_prepared_list.prev); - error = device_suspend(dev); + list_move(&dev->power.entry, &dpm_suspended_list); - mutex_lock(&dpm_list_mtx); + if (dpm_async_fn(dev, async_suspend)) + continue; - if (error) { - pm_dev_err(dev, state, "", error); - dpm_save_failed_dev(dev_name(dev)); - } else if (!list_empty(&dev->power.entry)) { - list_move(&dev->power.entry, &dpm_suspended_list); - } + get_device(dev); mutex_unlock(&dpm_list_mtx); + error = device_suspend(dev, state, false); + put_device(dev); mutex_lock(&dpm_list_mtx); @@ -1791,14 +1754,16 @@ int dpm_suspend(pm_message_t state) if (error || async_error) break; } + mutex_unlock(&dpm_list_mtx); + async_synchronize_full(); if (!error) error = async_error; - if (error) { - suspend_stats.failed_suspend++; + + if (error) dpm_save_failed_step(SUSPEND_SUSPEND); - } + dpm_show_time(starttime, state, error, NULL); trace_suspend_resume(TPS("dpm_suspend"), state.event, false); return error; @@ -1949,11 +1914,11 @@ int dpm_suspend_start(pm_message_t state) int error; error = dpm_prepare(state); - if (error) { - suspend_stats.failed_prepare++; + if (error) dpm_save_failed_step(SUSPEND_PREPARE); - } else + else error = dpm_suspend(state); + dpm_show_time(starttime, state, error, "start"); return error; } diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 05793c9fbb..2ee4584148 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -94,6 +94,7 @@ static void update_pm_runtime_accounting(struct device *dev) static void __update_runtime_status(struct device *dev, enum rpm_status status) { update_pm_runtime_accounting(dev); + trace_rpm_status(dev, status); dev->power.runtime_status = status; } @@ -1176,7 +1177,7 @@ int __pm_runtime_resume(struct device *dev, int rpmflags) EXPORT_SYMBOL_GPL(__pm_runtime_resume); /** - * pm_runtime_get_if_active - Conditionally bump up device usage counter. + * pm_runtime_get_conditional - Conditionally bump up device usage counter. * @dev: Device to handle. * @ign_usage_count: Whether or not to look at the current usage counter value. * @@ -1197,7 +1198,7 @@ EXPORT_SYMBOL_GPL(__pm_runtime_resume); * The caller is responsible for decrementing the runtime PM usage counter of * @dev after this function has returned a positive value for it. */ -int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count) +static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count) { unsigned long flags; int retval; @@ -1218,9 +1219,40 @@ int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count) return retval; } + +/** + * pm_runtime_get_if_active - Bump up runtime PM usage counter if the device is + * in active state + * @dev: Target device. + * + * Increment the runtime PM usage counter of @dev if its runtime PM status is + * %RPM_ACTIVE, in which case it returns 1. If the device is in a different + * state, 0 is returned. -EINVAL is returned if runtime PM is disabled for the + * device, in which case also the usage_count will remain unmodified. + */ +int pm_runtime_get_if_active(struct device *dev) +{ + return pm_runtime_get_conditional(dev, true); +} EXPORT_SYMBOL_GPL(pm_runtime_get_if_active); /** + * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter. + * @dev: Target device. + * + * Increment the runtime PM usage counter of @dev if its runtime PM status is + * %RPM_ACTIVE and its runtime PM usage counter is greater than 0, in which case + * it returns 1. If the device is in a different state or its usage_count is 0, + * 0 is returned. -EINVAL is returned if runtime PM is disabled for the device, + * in which case also the usage_count will remain unmodified. + */ +int pm_runtime_get_if_in_use(struct device *dev) +{ + return pm_runtime_get_conditional(dev, false); +} +EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use); + +/** * __pm_runtime_set_status - Set runtime PM status of a device. * @dev: Device to handle. * @status: New runtime PM status of the device. diff --git a/drivers/base/property.c b/drivers/base/property.c index a1b01ab420..7324a704a9 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -7,15 +7,16 @@ * Mika Westerberg <mika.westerberg@linux.intel.com> */ -#include <linux/acpi.h> +#include <linux/device.h> +#include <linux/err.h> #include <linux/export.h> -#include <linux/kernel.h> +#include <linux/kconfig.h> #include <linux/of.h> -#include <linux/of_address.h> -#include <linux/of_graph.h> -#include <linux/of_irq.h> #include <linux/property.h> #include <linux/phy.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/types.h> struct fwnode_handle *__dev_fwnode(struct device *dev) { @@ -700,34 +701,6 @@ struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode) EXPORT_SYMBOL_GPL(fwnode_get_next_parent); /** - * fwnode_get_next_parent_dev - Find device of closest ancestor fwnode - * @fwnode: firmware node - * - * Given a firmware node (@fwnode), this function finds its closest ancestor - * firmware node that has a corresponding struct device and returns that struct - * device. - * - * The caller is responsible for calling put_device() on the returned device - * pointer. - * - * Return: a pointer to the device of the @fwnode's closest ancestor. - */ -struct device *fwnode_get_next_parent_dev(const struct fwnode_handle *fwnode) -{ - struct fwnode_handle *parent; - struct device *dev; - - fwnode_for_each_parent_node(fwnode, parent) { - dev = get_dev_from_fwnode(parent); - if (dev) { - fwnode_handle_put(parent); - return dev; - } - } - return NULL; -} - -/** * fwnode_count_parents - Return the number of parents a node has * @fwnode: The node the parents of which are to be counted * @@ -774,34 +747,6 @@ struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwnode, EXPORT_SYMBOL_GPL(fwnode_get_nth_parent); /** - * fwnode_is_ancestor_of - Test if @ancestor is ancestor of @child - * @ancestor: Firmware which is tested for being an ancestor - * @child: Firmware which is tested for being the child - * - * A node is considered an ancestor of itself too. - * - * Return: true if @ancestor is an ancestor of @child. Otherwise, returns false. - */ -bool fwnode_is_ancestor_of(const struct fwnode_handle *ancestor, const struct fwnode_handle *child) -{ - struct fwnode_handle *parent; - - if (IS_ERR_OR_NULL(ancestor)) - return false; - - if (child == ancestor) - return true; - - fwnode_for_each_parent_node(child, parent) { - if (parent == ancestor) { - fwnode_handle_put(parent); - return true; - } - } - return false; -} - -/** * fwnode_get_next_child_node - Return the next child node handle for a node * @fwnode: Firmware node to find the next child node for. * @child: Handle to one of the node's child nodes or a %NULL handle. diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index 583dd5d7d4..bcdb25bec7 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h @@ -93,6 +93,7 @@ struct regmap { #endif unsigned int max_register; + bool max_register_is_set; bool (*writeable_reg)(struct device *dev, unsigned int reg); bool (*readable_reg)(struct device *dev, unsigned int reg); bool (*volatile_reg)(struct device *dev, unsigned int reg); diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c index b7e4b24641..9b17c77dec 100644 --- a/drivers/base/regmap/regcache-flat.c +++ b/drivers/base/regmap/regcache-flat.c @@ -23,7 +23,7 @@ static int regcache_flat_init(struct regmap *map) int i; unsigned int *cache; - if (!map || map->reg_stride_order < 0 || !map->max_register) + if (!map || map->reg_stride_order < 0 || !map->max_register_is_set) return -EINVAL; map->cache = kcalloc(regcache_flat_get_index(map, map->max_register) diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index ac63a73ccd..2e41cb12b8 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -187,8 +187,10 @@ int regcache_init(struct regmap *map, const struct regmap_config *config) return 0; } - if (!map->max_register && map->num_reg_defaults_raw) + if (!map->max_register_is_set && map->num_reg_defaults_raw) { map->max_register = (map->num_reg_defaults_raw - 1) * map->reg_stride; + map->max_register_is_set = true; + } if (map->cache_ops->init) { dev_dbg(map->dev, "Initializing %s cache\n", diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c index 0d957c5f1b..bb2ab6129f 100644 --- a/drivers/base/regmap/regmap-kunit.c +++ b/drivers/base/regmap/regmap-kunit.c @@ -1341,6 +1341,71 @@ static void raw_sync(struct kunit *test) regmap_exit(map); } +static void raw_ranges(struct kunit *test) +{ + struct raw_test_types *t = (struct raw_test_types *)test->param_value; + struct regmap *map; + struct regmap_config config; + struct regmap_ram_data *data; + unsigned int val; + int i; + + config = raw_regmap_config; + config.volatile_reg = test_range_all_volatile; + config.ranges = &test_range; + config.num_ranges = 1; + config.max_register = test_range.range_max; + + map = gen_raw_regmap(&config, t, &data); + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); + if (IS_ERR(map)) + return; + + /* Reset the page to a non-zero value to trigger a change */ + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg, + test_range.range_max)); + + /* Check we set the page and use the window for writes */ + data->written[test_range.selector_reg] = false; + data->written[test_range.window_start] = false; + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0)); + KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); + KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]); + + data->written[test_range.selector_reg] = false; + data->written[test_range.window_start] = false; + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, + test_range.range_min + + test_range.window_len, + 0)); + KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); + KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]); + + /* Same for reads */ + data->written[test_range.selector_reg] = false; + data->read[test_range.window_start] = false; + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val)); + KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); + KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]); + + data->written[test_range.selector_reg] = false; + data->read[test_range.window_start] = false; + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, + test_range.range_min + + test_range.window_len, + &val)); + KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); + KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]); + + /* No physical access triggered in the virtual range */ + for (i = test_range.range_min; i < test_range.range_max; i++) { + KUNIT_EXPECT_FALSE(test, data->read[i]); + KUNIT_EXPECT_FALSE(test, data->written[i]); + } + + regmap_exit(map); +} + static struct kunit_case regmap_test_cases[] = { KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params), KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params), @@ -1368,6 +1433,7 @@ static struct kunit_case regmap_test_cases[] = { KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params), KUNIT_CASE_PARAM(raw_noinc_write, raw_test_types_gen_params), KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params), + KUNIT_CASE_PARAM(raw_ranges, raw_test_cache_types_gen_params), {} }; diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 4ae399c30f..0a34dd3c4f 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -89,7 +89,7 @@ EXPORT_SYMBOL_GPL(regmap_check_range_table); bool regmap_writeable(struct regmap *map, unsigned int reg) { - if (map->max_register && reg > map->max_register) + if (map->max_register_is_set && reg > map->max_register) return false; if (map->writeable_reg) @@ -112,7 +112,7 @@ bool regmap_cached(struct regmap *map, unsigned int reg) if (!map->cache_ops) return false; - if (map->max_register && reg > map->max_register) + if (map->max_register_is_set && reg > map->max_register) return false; map->lock(map->lock_arg); @@ -129,7 +129,7 @@ bool regmap_readable(struct regmap *map, unsigned int reg) if (!map->reg_read) return false; - if (map->max_register && reg > map->max_register) + if (map->max_register_is_set && reg > map->max_register) return false; if (map->format.format_write) @@ -787,6 +787,7 @@ struct regmap *__regmap_init(struct device *dev, map->bus = bus; map->bus_context = bus_context; map->max_register = config->max_register; + map->max_register_is_set = map->max_register ?: config->max_register_is_0; map->wr_table = config->wr_table; map->rd_table = config->rd_table; map->volatile_table = config->volatile_table; @@ -1412,6 +1413,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) regmap_debugfs_exit(map); map->max_register = config->max_register; + map->max_register_is_set = map->max_register ?: config->max_register_is_0; map->writeable_reg = config->writeable_reg; map->readable_reg = config->readable_reg; map->volatile_reg = config->volatile_reg; @@ -3420,7 +3422,7 @@ EXPORT_SYMBOL_GPL(regmap_get_val_bytes); */ int regmap_get_max_register(struct regmap *map) { - return map->max_register ? map->max_register : -EINVAL; + return map->max_register_is_set ? map->max_register : -EINVAL; } EXPORT_SYMBOL_GPL(regmap_get_max_register); diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c index 36512fb75a..eb6eb25b34 100644 --- a/drivers/base/swnode.c +++ b/drivers/base/swnode.c @@ -6,10 +6,21 @@ * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com> */ +#include <linux/container_of.h> #include <linux/device.h> -#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/export.h> +#include <linux/idr.h> +#include <linux/init.h> +#include <linux/kobject.h> +#include <linux/kstrtox.h> +#include <linux/list.h> #include <linux/property.h> #include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/sysfs.h> +#include <linux/types.h> #include "base.h" |